]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines_asm.s
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines_asm.s
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23#include <ppc/asm.h>
24#include <ppc/proc_reg.h>
1c79356b
A
25#include <assym.s>
26#include <debug.h>
27#include <mach/ppc/vm_param.h>
28#include <ppc/exception.h>
29
55e303ae
A
30
31/*
32 * ml_set_physical() -- turn off DR and (if 64-bit) turn SF on
33 * it is assumed that pf64Bit is already in cr6
34 * ml_set_physical_get_ffs() -- turn DR off, SF on, and get feature flags
35 * ml_set_physical_disabled() -- turn DR and EE off, SF on, get feature flags
36 * ml_set_translation_off() -- turn DR, IR, and EE off, SF on, get feature flags
37 *
38 * Callable only from assembler, these return:
39 * r2 -- new MSR
40 * r11 -- old MSR
41 * r10 -- feature flags (pf64Bit etc, ie SPRG 2)
42 * cr6 -- feature flags 24-27, ie pf64Bit, pf128Byte, and pf32Byte
43 *
44 * Uses r0 and r2. ml_set_translation_off also uses r3 and cr5.
45 */
46
47 .align 4
48 .globl EXT(ml_set_translation_off)
49LEXT(ml_set_translation_off)
50 mfsprg r10,2 // get feature flags
51 li r0,0 ; Clear this
52 mtcrf 0x02,r10 // move pf64Bit etc to cr6
53 ori r0,r0,lo16(MASK(MSR_EE)+MASK(MSR_FP)+MASK(MSR_IR)+MASK(MSR_DR)) // turn off all 4
54 mfmsr r11 // get MSR
55 oris r0,r0,hi16(MASK(MSR_VEC)) // Turn off vector too
56 mtcrf 0x04,r10 // move pfNoMSRir etc to cr5
57 andc r2,r11,r0 // turn off EE, IR, and DR
58 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
59 bf pfNoMSRirb,ml_set_physical_32 // skip if we can load MSR directly
60 li r0,loadMSR // Get the MSR setter SC
61 mr r3,r2 // copy new MSR to r2
62 sc // Set it
63 blr
64
65 .align 4
66 .globl EXT(ml_set_physical_disabled)
67
68LEXT(ml_set_physical_disabled)
69 li r0,0 ; Clear
70 mfsprg r10,2 // get feature flags
71 ori r0,r0,lo16(MASK(MSR_EE)) // turn EE and fp off
72 mtcrf 0x02,r10 // move pf64Bit etc to cr6
73 b ml_set_physical_join
74
75 .align 5
76 .globl EXT(ml_set_physical_get_ffs)
77
78LEXT(ml_set_physical_get_ffs)
79 mfsprg r10,2 // get feature flags
80 mtcrf 0x02,r10 // move pf64Bit etc to cr6
81
82 .globl EXT(ml_set_physical)
83LEXT(ml_set_physical)
84
85 li r0,0 // do not turn off interrupts
86
87ml_set_physical_join:
88 oris r0,r0,hi16(MASK(MSR_VEC)) // Always gonna turn of vectors
89 mfmsr r11 // get MSR
90 ori r0,r0,lo16(MASK(MSR_DR)+MASK(MSR_FP)) // always turn off DR and FP bit
91 andc r2,r11,r0 // turn off DR and maybe EE
92 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
93ml_set_physical_32:
94 mtmsr r2 // turn off translation
95 isync
96 blr
97
98ml_set_physical_64:
99 li r0,1 // get a 1 to slam into SF
100 rldimi r2,r0,63,MSR_SF_BIT // set SF bit (bit 0)
101 mtmsrd r2 // set 64-bit mode, turn off data relocation
102 isync // synchronize
103 blr
104
105
106/*
107 * ml_restore(old_MSR)
108 *
109 * Callable only from assembler, restores the MSR in r11 saved by ml_set_physical.
110 * We assume cr6 and r11 are as set by ml_set_physical, ie:
111 * cr6 - pf64Bit flag (feature flags 24-27)
112 * r11 - old MSR
113 */
114
115 .align 5
116 .globl EXT(ml_restore)
117
118LEXT(ml_restore)
119 bt++ pf64Bitb,ml_restore_64 // handle 64-bit cpus (only they take the hint)
120 mtmsr r11 // restore a 32-bit MSR
121 isync
122 blr
123
124ml_restore_64:
125 mtmsrd r11 // restore a 64-bit MSR
126 isync
127 blr
128
129
1c79356b
A
130/* PCI config cycle probing
131 *
132 * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val)
133 *
134 * Read the memory location at physical address paddr.
135 * This is a part of a device probe, so there is a good chance we will
136 * have a machine check here. So we have to be able to handle that.
137 * We assume that machine checks are enabled both in MSR and HIDs
138 */
139
140; Force a line boundry here
141 .align 5
142 .globl EXT(ml_probe_read)
143
144LEXT(ml_probe_read)
145
146 mfsprg r9,2 ; Get feature flags
55e303ae
A
147
148 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
149 rlwinm r3,r3,0,0,31 ; Clean up for 64-bit machines
150 bne++ mpr64bit ; Go do this the 64-bit way...
151
152mpr32bit: lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
1c79356b 153 mfmsr r0 ; Save the current MSR
55e303ae
A
154 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
155
1c79356b 156 neg r10,r3 ; Number of bytes to end of page
55e303ae 157 andc r0,r0,r8 ; Clear VEC and FP
1c79356b 158 rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry
55e303ae 159 ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, IR, and DR
1c79356b 160 mr r12,r3 ; Save the load address
55e303ae 161 andc r2,r0,r8 ; Clear VEC, FP, and EE
1c79356b
A
162 mtcrf 0x04,r9 ; Set the features
163 cmplwi cr1,r10,4 ; At least 4 bytes left in page?
1c79356b
A
164 beq- mprdoit ; We are right on the boundary...
165 li r3,0
166 bltlr- cr1 ; No, just return failure...
167
168mprdoit:
169
170 bt pfNoMSRirb,mprNoMSR ; No MSR...
171
172 mtmsr r2 ; Translation and all off
173 isync ; Toss prefetch
174 b mprNoMSRx
175
176mprNoMSR:
177 mr r5,r0
178 li r0,loadMSR ; Get the MSR setter SC
179 mr r3,r2 ; Get new MSR
180 sc ; Set it
181 mr r0,r5
182 li r3,0
183mprNoMSRx:
d52fe63f
A
184
185 mfspr r6, hid0 ; Get a copy of hid0
1c79356b 186
9bccf70c
A
187 rlwinm. r5, r9, 0, pfNoMuMMCKb, pfNoMuMMCKb ; Check for NoMuMMCK
188 bne mprNoMuM
189
190 rlwinm r5, r6, 0, ice+1, ice-1 ; Turn off L1 I-Cache
191 mtspr hid0, r5
192 isync ; Wait for I-Cache off
193 rlwinm r5, r6, 0, mum+1, mum-1 ; Turn off MuM w/ I-Cache on
194 mtspr hid0, r5
195mprNoMuM:
d52fe63f 196
1c79356b
A
197;
198; We need to insure that there is no more than 1 BAT register that
199; can get a hit. There could be repercussions beyond the ken
200; of mortal man. It is best not to tempt fate.
201;
d52fe63f
A
202
203; Note: we will reload these from the shadow BATs later
204
1c79356b 205 li r10,0 ; Clear a register
1c79356b
A
206
207 sync ; Make sure all is well
208
209 mtdbatu 1,r10 ; Invalidate DBAT 1
210 mtdbatu 2,r10 ; Invalidate DBAT 2
211 mtdbatu 3,r10 ; Invalidate DBAT 3
212
213 rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary
214 ori r11,r10,0x32 ; Set uncached, coherent, R/W
215 ori r10,r10,2 ; Make the upper half (128k, valid supervisor)
216 mtdbatl 0,r11 ; Set lower BAT first
217 mtdbatu 0,r10 ; Now the upper
218 sync ; Just make sure
219
d52fe63f
A
220 dcbf 0,r12 ; Make sure we kill the cache to avoid paradoxes
221 sync
222
1c79356b
A
223 ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation
224 mtmsr r11 ; Do it for real
225 isync ; Make sure of it
226
227 eieio ; Make sure of all previous accesses
228 sync ; Make sure it is all caught up
229
230 lwz r11,0(r12) ; Get it and maybe machine check here
231
232 eieio ; Make sure of ordering again
233 sync ; Get caught up yet again
234 isync ; Do not go further till we are here
235
d52fe63f
A
236 mtmsr r2 ; Turn translation back off
237 isync
238
d52fe63f
A
239 lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
240 ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
241
242 lwz r5,0(r10) ; Pick up DBAT 0 high
243 lwz r6,4(r10) ; Pick up DBAT 0 low
244 lwz r7,8(r10) ; Pick up DBAT 1 high
245 lwz r8,16(r10) ; Pick up DBAT 2 high
246 lwz r9,24(r10) ; Pick up DBAT 3 high
247
1c79356b
A
248 mtdbatu 0,r5 ; Restore DBAT 0 high
249 mtdbatl 0,r6 ; Restore DBAT 0 low
250 mtdbatu 1,r7 ; Restore DBAT 1 high
251 mtdbatu 2,r8 ; Restore DBAT 2 high
252 mtdbatu 3,r9 ; Restore DBAT 3 high
253 sync
254
255 li r3,1 ; We made it
256
257 mtmsr r0 ; Restore translation and exceptions
258 isync ; Toss speculations
259
260 stw r11,0(r4) ; Save the loaded value
261 blr ; Return...
262
263; Force a line boundry here. This means we will be able to check addresses better
264 .align 5
265 .globl EXT(ml_probe_read_mck)
266LEXT(ml_probe_read_mck)
267
55e303ae
A
268
269/* PCI config cycle probing - 64-bit
270 *
271 * boolean_t ml_probe_read_64(addr64_t paddr, unsigned int *val)
272 *
273 * Read the memory location at physical address paddr.
274 * This is a part of a device probe, so there is a good chance we will
275 * have a machine check here. So we have to be able to handle that.
276 * We assume that machine checks are enabled both in MSR and HIDs
277 */
278
279; Force a line boundry here
280 .align 6
281 .globl EXT(ml_probe_read_64)
282
283LEXT(ml_probe_read_64)
284
285 mfsprg r9,2 ; Get feature flags
286 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
287 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
288 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
289
290 mr r4,r5 ; Move result to common register
291 beq-- mpr32bit ; Go do this the 32-bit way...
292
293mpr64bit: andi. r0,r3,3 ; Check if we are on a word boundary
294 li r0,0 ; Clear the EE bit (and everything else for that matter)
295 bne-- mprFail ; Boundary not good...
296 mfmsr r11 ; Get the MSR
297 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
298 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
299 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
300 or r12,r10,r11 ; Turn on EE if on before we turned it off
301 ori r0,r0,lo16(MASK(MSR_IR)|MASK(MSR_DR)) ; Get the IR and DR bits
302 li r2,1 ; Get a 1
303 sldi r2,r2,63 ; Get the 64-bit bit
304 andc r10,r10,r0 ; Clear IR and DR
305 or r10,r10,r2 ; Set 64-bit
306
307 li r0,1 ; Get a 1
308 mtmsrd r10 ; Translation and EE off, 64-bit on
309 isync
310
311 sldi r0,r0,32+8 ; Get the right bit to inhibit caching
312
313 mfspr r8,hid4 ; Get HID4
314 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
315 sync ; Sync up
316 mtspr hid4,r2 ; Make real accesses cache-inhibited
317 isync ; Toss prefetches
318
319 lis r7,0xE000 ; Get the unlikeliest ESID possible
320 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
321 slbie r7 ; Make sure the ERAT is cleared
322
323 sync
324 isync
325
326 eieio ; Make sure of all previous accesses
327
328 lwz r11,0(r3) ; Get it and maybe machine check here
329
330 eieio ; Make sure of ordering again
331 sync ; Get caught up yet again
332 isync ; Do not go further till we are here
333
334 sync ; Sync up
335 mtspr hid4,r8 ; Make real accesses not cache-inhibited
336 isync ; Toss prefetches
337
338 lis r7,0xE000 ; Get the unlikeliest ESID possible
339 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
340 slbie r7 ; Make sure the ERAT is cleared
341
342 mtmsrd r12 ; Restore entry MSR
343 isync
344
345 stw r11,0(r4) ; Pass back the result
346 li r3,1 ; Indicate success
347 blr ; Leave...
348
349mprFail: li r3,0 ; Set failure
350 blr ; Leave...
351
352; Force a line boundry here. This means we will be able to check addresses better
353 .align 6
354 .globl EXT(ml_probe_read_mck_64)
355LEXT(ml_probe_read_mck_64)
356
357
358/* Read physical address byte
1c79356b
A
359 *
360 * unsigned int ml_phys_read_byte(vm_offset_t paddr)
55e303ae 361 * unsigned int ml_phys_read_byte_64(addr64_t paddr)
1c79356b
A
362 *
363 * Read the byte at physical address paddr. Memory should not be cache inhibited.
364 */
365
366; Force a line boundry here
55e303ae 367
1c79356b 368 .align 5
55e303ae
A
369 .globl EXT(ml_phys_read_byte_64)
370
371LEXT(ml_phys_read_byte_64)
372
373 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
374 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
375 b ml_phys_read_byte_join
376
1c79356b
A
377 .globl EXT(ml_phys_read_byte)
378
379LEXT(ml_phys_read_byte)
55e303ae
A
380 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
381ml_phys_read_byte_join: ; r3 = address to read (reg64_t)
382 mflr r11 ; Save the return
383 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
384
385 lbz r3,0(r3) ; Get the byte
386 b rdwrpost ; Clean up and leave...
d7e50217 387
d7e50217 388
55e303ae
A
389/* Read physical address half word
390 *
391 * unsigned int ml_phys_read_half(vm_offset_t paddr)
392 * unsigned int ml_phys_read_half_64(addr64_t paddr)
393 *
394 * Read the half word at physical address paddr. Memory should not be cache inhibited.
395 */
1c79356b 396
55e303ae 397; Force a line boundry here
d7e50217 398
55e303ae
A
399 .align 5
400 .globl EXT(ml_phys_read_half_64)
401
402LEXT(ml_phys_read_half_64)
403
404 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
405 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
406 b ml_phys_read_half_join
d7e50217 407
55e303ae
A
408 .globl EXT(ml_phys_read_half)
409
410LEXT(ml_phys_read_half)
411 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
412ml_phys_read_half_join: ; r3 = address to read (reg64_t)
413 mflr r11 ; Save the return
414 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
415
416 lhz r3,0(r3) ; Get the half word
417 b rdwrpost ; Clean up and leave...
418
419
420/* Read physical address word
1c79356b
A
421 *
422 * unsigned int ml_phys_read(vm_offset_t paddr)
55e303ae
A
423 * unsigned int ml_phys_read_64(addr64_t paddr)
424 * unsigned int ml_phys_read_word(vm_offset_t paddr)
425 * unsigned int ml_phys_read_word_64(addr64_t paddr)
1c79356b
A
426 *
427 * Read the word at physical address paddr. Memory should not be cache inhibited.
428 */
429
430; Force a line boundry here
55e303ae 431
1c79356b 432 .align 5
55e303ae
A
433 .globl EXT(ml_phys_read_64)
434 .globl EXT(ml_phys_read_word_64)
435
436LEXT(ml_phys_read_64)
437LEXT(ml_phys_read_word_64)
438
439 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
440 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
441 b ml_phys_read_word_join
442
1c79356b 443 .globl EXT(ml_phys_read)
55e303ae 444 .globl EXT(ml_phys_read_word)
1c79356b
A
445
446LEXT(ml_phys_read)
55e303ae
A
447LEXT(ml_phys_read_word)
448 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
449ml_phys_read_word_join: ; r3 = address to read (reg64_t)
450 mflr r11 ; Save the return
451 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
452
453 lwz r3,0(r3) ; Get the word
454 b rdwrpost ; Clean up and leave...
d7e50217 455
d7e50217 456
55e303ae
A
457/* Read physical address double word
458 *
459 * unsigned long long ml_phys_read_double(vm_offset_t paddr)
460 * unsigned long long ml_phys_read_double_64(addr64_t paddr)
461 *
462 * Read the double word at physical address paddr. Memory should not be cache inhibited.
463 */
464
465; Force a line boundry here
466
467 .align 5
468 .globl EXT(ml_phys_read_double_64)
469
470LEXT(ml_phys_read_double_64)
471
472 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
473 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
474 b ml_phys_read_double_join
475
476 .globl EXT(ml_phys_read_double)
477
478LEXT(ml_phys_read_double)
479 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
480ml_phys_read_double_join: ; r3 = address to read (reg64_t)
481 mflr r11 ; Save the return
482 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
1c79356b 483
55e303ae
A
484 lwz r4,4(r3) ; Get the low word
485 lwz r3,0(r3) ; Get the high word
486 b rdwrpost ; Clean up and leave...
1c79356b 487
1c79356b
A
488
489/* Write physical address byte
490 *
491 * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
55e303ae 492 * void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
1c79356b
A
493 *
494 * Write the byte at physical address paddr. Memory should not be cache inhibited.
495 */
496
1c79356b 497 .align 5
55e303ae
A
498 .globl EXT(ml_phys_write_byte_64)
499
500LEXT(ml_phys_write_byte_64)
501
502 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
503 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
504 mr r4,r5 ; Copy over the data
505 b ml_phys_write_byte_join
506
1c79356b
A
507 .globl EXT(ml_phys_write_byte)
508
509LEXT(ml_phys_write_byte)
55e303ae
A
510 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
511ml_phys_write_byte_join: ; r3 = address to write (reg64_t), r4 = data
512 mflr r11 ; Save the return
513 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
514
515 stb r4,0(r3) ; Set the byte
516 b rdwrpost ; Clean up and leave...
1c79356b 517
d7e50217 518
55e303ae
A
519/* Write physical address half word
520 *
521 * void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
522 * void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
523 *
524 * Write the half word at physical address paddr. Memory should not be cache inhibited.
525 */
526
527 .align 5
528 .globl EXT(ml_phys_write_half_64)
529
530LEXT(ml_phys_write_half_64)
531
532 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
533 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
534 mr r4,r5 ; Copy over the data
535 b ml_phys_write_half_join
536
537 .globl EXT(ml_phys_write_half)
538
539LEXT(ml_phys_write_half)
540 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
541ml_phys_write_half_join: ; r3 = address to write (reg64_t), r4 = data
542 mflr r11 ; Save the return
543 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
1c79356b 544
55e303ae
A
545 sth r4,0(r3) ; Set the half word
546 b rdwrpost ; Clean up and leave...
1c79356b 547
1c79356b 548
55e303ae 549/* Write physical address word
1c79356b
A
550 *
551 * void ml_phys_write(vm_offset_t paddr, unsigned int data)
55e303ae
A
552 * void ml_phys_write_64(addr64_t paddr, unsigned int data)
553 * void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
554 * void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
1c79356b
A
555 *
556 * Write the word at physical address paddr. Memory should not be cache inhibited.
557 */
558
1c79356b 559 .align 5
55e303ae
A
560 .globl EXT(ml_phys_write_64)
561 .globl EXT(ml_phys_write_word_64)
562
563LEXT(ml_phys_write_64)
564LEXT(ml_phys_write_word_64)
565
566 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
567 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
568 mr r4,r5 ; Copy over the data
569 b ml_phys_write_word_join
570
1c79356b 571 .globl EXT(ml_phys_write)
55e303ae 572 .globl EXT(ml_phys_write_word)
1c79356b
A
573
574LEXT(ml_phys_write)
55e303ae
A
575LEXT(ml_phys_write_word)
576 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
577ml_phys_write_word_join: ; r3 = address to write (reg64_t), r4 = data
578 mflr r11 ; Save the return
579 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
580
581 stw r4,0(r3) ; Set the word
582 b rdwrpost ; Clean up and leave...
d7e50217 583
d7e50217 584
55e303ae
A
585/* Write physical address double word
586 *
587 * void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
588 * void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
589 *
590 * Write the double word at physical address paddr. Memory should not be cache inhibited.
591 */
592
593 .align 5
594 .globl EXT(ml_phys_write_double_64)
595
596LEXT(ml_phys_write_double_64)
597
598 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
599 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
600 mr r4,r5 ; Copy over the high data
601 mr r5,r6 ; Copy over the low data
602 b ml_phys_write_double_join
603
604 .globl EXT(ml_phys_write_double)
605
606LEXT(ml_phys_write_double)
607 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
608ml_phys_write_double_join: ; r3 = address to write (reg64_t), r4,r5 = data (long long)
609 mflr r11 ; Save the return
610 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
611
612 stw r4,0(r3) ; Set the high word
613 stw r5,4(r3) ; Set the low word
614 b rdwrpost ; Clean up and leave...
615
616
617 .align 5
618
619rdwrpre: mfsprg r12,2 ; Get feature flags
620 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
621 mfmsr r10 ; Save the MSR
622 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
623 mtcrf 0x02,r12 ; move pf64Bit
624 andc r10,r10,r8 ; Clear VEC and FP
625 ori r9,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, DR, and IR
626 li r2,1 ; Prepare for 64 bit
627 andc r9,r10,r9 ; Clear VEC, FP, DR, and EE
628 bf-- pf64Bitb,rdwrpre32 ; Join 32-bit code...
629
630 srdi r7,r3,31 ; Get a 1 if address is in I/O memory
631 rldimi r9,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
632 cmpldi cr7,r7,1 ; Is source in I/O memory?
633 mtmsrd r9 ; set 64-bit mode, turn off EE, DR, and IR
634 isync ; synchronize
635
636 sldi r0,r2,32+8 ; Get the right bit to turn off caching
637
638 bnelr++ cr7 ; We are not in the I/O area, all ready...
639
640 mfspr r8,hid4 ; Get HID4
641 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
642 sync ; Sync up
643 mtspr hid4,r2 ; Make real accesses cache-inhibited
644 isync ; Toss prefetches
645
646 lis r7,0xE000 ; Get the unlikeliest ESID possible
647 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
648 slbie r7 ; Make sure the ERAT is cleared
1c79356b 649
1c79356b 650 sync
55e303ae
A
651 isync
652 blr ; Finally, all ready...
653
654 .align 5
655
656rdwrpre32: rlwimi r9,r10,0,MSR_IR_BIT,MSR_IR_BIT ; Leave the IR bit unchanged
657 mtmsr r9 ; Drop EE, DR, and leave IR unchanged
658 isync
659 blr ; All set up, leave...
660
661 .align 5
662
663rdwrpost: mtlr r11 ; Restore the return
664 bt++ pf64Bitb,rdwrpost64 ; Join 64-bit code...
665
666 mtmsr r10 ; Restore entry MSR (sans FP and VEC)
667 isync
668 blr ; Leave...
669
670rdwrpost64: bne++ cr7,rdwrpcok ; Skip enabling real mode caching if we did not change it...
1c79356b 671
55e303ae
A
672 sync ; Sync up
673 mtspr hid4,r8 ; Make real accesses not cache-inhibited
674 isync ; Toss prefetches
675
676 lis r7,0xE000 ; Get the unlikeliest ESID possible
677 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
678 slbie r7 ; Make sure the ERAT is cleared
679
680rdwrpcok: mtmsrd r10 ; Restore entry MSR (sans FP and VEC)
1c79356b 681 isync
55e303ae 682 blr ; Leave...
1c79356b
A
683
684
685/* set interrupts enabled or disabled
686 *
687 * boolean_t set_interrupts_enabled(boolean_t enable)
688 *
689 * Set EE bit to "enable" and return old value as boolean
690 */
691
692; Force a line boundry here
0b4e3aa0
A
693 .align 5
694 .globl EXT(ml_set_interrupts_enabled)
695
696LEXT(ml_set_interrupts_enabled)
1c79356b 697
55e303ae
A
698 andi. r4,r3,1 ; Are we turning interruptions on?
699 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1c79356b 700 mfmsr r5 ; Get the current MSR
55e303ae 701 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get float enable and EE enable
1c79356b 702 rlwinm r3,r5,17,31,31 ; Set return value
55e303ae
A
703 andc r5,r5,r0 ; Force VEC and FP off
704 bne CheckPreemption ; Interrupts going on, check ASTs...
705
706 mtmsr r5 ; Slam diable (always going disabled here)
707 isync ; Need this because FP/Vec might go off
1c79356b
A
708 blr
709
55e303ae
A
710 .align 5
711
d7e50217 712CheckPreemption:
55e303ae 713 mfsprg r9,1 ; Get current activation
91447636
A
714 lwz r7,ACT_PER_PROC(r9) ; Get the per_proc block
715 ori r5,r5,lo16(MASK(MSR_EE)) ; Turn on the enable
716 lwz r8,PP_PENDING_AST(r7) ; Get pending AST mask
55e303ae
A
717 li r6,AST_URGENT ; Get the type we will preempt for
718 lwz r7,ACT_PREEMPT_CNT(r9) ; Get preemption count
55e303ae
A
719 lis r0,hi16(DoPreemptCall) ; High part of Preempt FW call
720 cmpwi cr1,r7,0 ; Are preemptions masked off?
721 and. r8,r8,r6 ; Are we urgent?
722 crorc cr1_eq,cr0_eq,cr1_eq ; Remember if preemptions are masked or not urgent
723 ori r0,r0,lo16(DoPreemptCall) ; Bottome of FW call
724
d7e50217 725 mtmsr r5 ; Restore the MSR now, before we can preempt
55e303ae
A
726 isync ; Need this because FP/Vec might go off
727
728 beqlr++ cr1 ; Return if no premption...
d7e50217 729 sc ; Preempt
0b4e3aa0
A
730 blr
731
91447636
A
732; Force a line boundry here
733 .align 5
734 .globl EXT(timer_update)
735
736LEXT(timer_update)
737 stw r4,TIMER_HIGHCHK(r3)
738 eieio
739 stw r5,TIMER_LOW(r3)
740 eieio
741 stw r4,TIMER_HIGH(r3)
742 blr
de355530
A
743
744; Force a line boundry here
745 .align 5
91447636 746 .globl EXT(timer_grab)
de355530 747
91447636
A
748LEXT(timer_grab)
7490: lwz r11,TIMER_HIGH(r3)
750 lwz r4,TIMER_LOW(r3)
751 isync
752 lwz r9,TIMER_HIGHCHK(r3)
753 cmpw r11,r9
754 bne-- 0b
755 mr r3,r11
756 blr
de355530 757
91447636
A
758; Force a line boundry here
759 .align 5
760 .globl EXT(timer_event)
761
762LEXT(timer_event)
763 mfsprg r10,1 ; Get the current activation
764 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
765 addi r10,r10,PP_PROCESSOR
766 lwz r11,CURRENT_TIMER(r10)
767
768 lwz r9,TIMER_LOW(r11)
769 lwz r2,TIMER_TSTAMP(r11)
770 add r0,r9,r3
771 subf r5,r2,r0
772 cmplw r5,r9
773 bge++ 0f
774
775 lwz r6,TIMER_HIGH(r11)
776 addi r6,r6,1
777 stw r6,TIMER_HIGHCHK(r11)
778 eieio
779 stw r5,TIMER_LOW(r11)
780 eieio
781 stw r6,TIMER_HIGH(r11)
782 b 1f
783
7840: stw r5,TIMER_LOW(r11)
785
7861: stw r4,CURRENT_TIMER(r10)
787 stw r3,TIMER_TSTAMP(r4)
788 blr
de355530 789
1c79356b
A
790/* Set machine into idle power-saving mode.
791 *
91447636 792 * void machine_idle(void)
1c79356b
A
793 *
794 * We will use the PPC NAP or DOZE for this.
795 * This call always returns. Must be called with spllo (i.e., interruptions
796 * enabled).
797 *
798 */
799
1c79356b
A
800; Force a line boundry here
801 .align 5
91447636 802 .globl EXT(machine_idle)
1c79356b 803
91447636 804LEXT(machine_idle)
1c79356b 805
91447636
A
806 mfsprg r12,1 ; Get the current activation
807 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
808 lhz r10,PP_CPU_FLAGS(r12) ; Get the flags
809 lwz r11,PP_INTS_ENABLED(r12) ; Get interrupt enabled state
810 andi. r10,r10,SignalReady ; Are Signal ready?
811 cmpwi cr1,r11,0 ; Are interrupt disabled?
812 cror cr0_eq, cr1_eq, cr0_eq ; Interrupt disabled or Signal not ready?
55e303ae 813 mfmsr r3 ; Save the MSR
91447636
A
814
815 beq-- nonap ; Yes, return after re-enabling interrupts
816 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector flag
55e303ae
A
817 ori r0,r0,lo16(MASK(MSR_FP)) ; Add the FP flag
818 andc r3,r3,r0 ; Clear VEC and FP
819 ori r0,r0,lo16(MASK(MSR_EE)) ; Drop EE also
820 andc r5,r3,r0 ; Clear VEC, FP, DR, and EE
821
1c79356b 822 mtmsr r5 ; Hold up interruptions for now
9bccf70c 823 isync ; May have messed with fp/vec
de355530 824 mfsprg r11,2 ; Get CPU specific features
55e303ae 825 mfspr r6,hid0 ; Get the current power-saving mode
1c79356b
A
826 mtcrf 0xC7,r11 ; Get the facility flags
827
828 lis r4,hi16(napm) ; Assume we can nap
829 bt pfWillNapb,yesnap ; Yeah, nap is ok...
830
831 lis r4,hi16(dozem) ; Assume we can doze
832 bt pfCanDozeb,yesnap ; We can sleep or doze one this machine...
5353443c 833
91447636 834nonap: ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE
5353443c 835
1c79356b
A
836 mtmsr r3 ; Turn interruptions back on
837 blr ; Leave...
838
839yesnap: mftbu r9 ; Get the upper timebase
840 mftb r7 ; Get the lower timebase
841 mftbu r8 ; Get the upper one again
842 cmplw r9,r8 ; Did the top tick?
5353443c 843 bne-- yesnap ; Yeah, need to get it again...
1c79356b
A
844 stw r8,napStamp(r12) ; Set high order time stamp
845 stw r7,napStamp+4(r12) ; Set low order nap stamp
d52fe63f 846
5eebf738 847 rlwinm. r0,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec?
91447636 848 beq-- minovec ; No...
5eebf738
A
849 dssall ; Stop the streams before we nap/doze
850 sync
851 lwz r8,napStamp(r12) ; Reload high order time stamp
852clearpipe:
853 cmplw r8,r8
5353443c 854 bne- clearpipe
5eebf738 855 isync
5eebf738 856
91447636
A
857minovec: rlwinm. r7,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before nap?
858 beq++ miL2PFok
9bccf70c
A
859
860 mfspr r7,msscr0 ; Get currect MSSCR0 value
55e303ae 861 rlwinm r7,r7,0,0,l2pfes-1 ; Disable L2 Prefetch
9bccf70c
A
862 mtspr msscr0,r7 ; Updates MSSCR0 value
863 sync
864 isync
865
91447636
A
866miL2PFok:
867 rlwinm. r7,r11,0,pfSlowNapb,pfSlowNapb ; Should nap at slow speed?
868 beq minoslownap
9bccf70c
A
869
870 mfspr r7,hid1 ; Get current HID1 value
55e303ae 871 oris r7,r7,hi16(hid1psm) ; Select PLL1
9bccf70c
A
872 mtspr hid1,r7 ; Update HID1 value
873
d52fe63f 874
1c79356b
A
875;
876; We have to open up interruptions here because book 4 says that we should
5353443c 877; turn on only the POW bit and that we should have interrupts enabled.
1c79356b
A
878; The interrupt handler will detect that nap or doze is set if an interrupt
879; is taken and set everything up to return directly to machine_idle_ret.
880; So, make sure everything we need there is already set up...
881;
55e303ae 882
5353443c 883minoslownap:
ab86ba33 884 lis r10,hi16(dozem|napm|sleepm) ; Mask of power management bits
55e303ae
A
885
886 bf-- pf64Bitb,mipNSF1 ; skip if 32-bit...
887
888 sldi r4,r4,32 ; Position the flags
889 sldi r10,r10,32 ; Position the masks
890
5353443c
A
891mipNSF1: li r2,lo16(MASK(MSR_DR)|MASK(MSR_IR)) ; Get the translation mask
892 andc r6,r6,r10 ; Clean up the old power bits
893 ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make exit msr
894 andc r5,r5,r2 ; Clear IR and DR from current MSR
1c79356b 895 or r6,r6,r4 ; Set nap or doze
5353443c
A
896 ori r5,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make nap msr
897 oris r2,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
55e303ae
A
898
899 sync
1c79356b 900 mtspr hid0,r6 ; Set up the HID for nap/doze
55e303ae
A
901 mfspr r6,hid0 ; Yes, this is silly, keep it here
902 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
903 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
904 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
905 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
906 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
1c79356b 907 isync ; Make sure it is set
1c79356b 908
91447636 909
5353443c
A
910;
911; Turn translation off to nap
912;
913
914 bt pfNoMSRirb,miNoMSR ; Jump if we need to use SC for this...
915 mtmsr r5 ; Turn translation off, interrupts on
916 isync ; Wait for it
917 b miNoMSRx ; Jump back in line...
4a249263 918
5353443c
A
919miNoMSR: mr r3,r5 ; Pass in the new MSR value
920 li r0,loadMSR ; MSR setter ultrafast
921 sc ; Do it to it like you never done before...
922
923miNoMSRx: bf-- pf64Bitb,mipowloop ; skip if 32-bit...
924
925 li r3,0x10 ; Fancy nap threshold is 0x10 ticks
4a249263
A
926 mftb r8 ; Get the low half of the time base
927 mfdec r4 ; Get the decrementer ticks
5353443c 928 cmplw r4,r3 ; Less than threshold?
4a249263
A
929 blt mipowloop
930
5353443c 931 mtdec r3 ; Load decrementer with threshold
4a249263
A
932 isync ; and make sure,
933 mfdec r3 ; really sure, it gets there
934
5353443c 935 rlwinm r6,r2,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear out the EE bit
4a249263
A
936 sync ; Make sure queues are clear
937 mtmsr r6 ; Set MSR with EE off but POW on
1c79356b 938 isync ; Make sure this takes before we proceed
4a249263
A
939
940 mftb r9 ; Get the low half of the time base
941 sub r9,r9,r8 ; Get the number of ticks spent waiting
942 sub r4,r4,r9 ; Adjust the decrementer value
943
5353443c 944 mtdec r4 ; Load decrementer with the rest of the timeout
4a249263
A
945 isync ; and make sure,
946 mfdec r4 ; really sure, it gets there
947
948mipowloop:
949 sync ; Make sure queues are clear
5353443c 950 mtmsr r2 ; Nap or doze, MSR with POW, EE set, translation off
4a249263
A
951 isync ; Make sure this takes before we proceed
952 b mipowloop ; loop if POW does not take
953
1c79356b
A
954;
955; Note that the interrupt handler will turn off the nap/doze bits in the hid.
956; Also remember that the interrupt handler will force return to here whenever
957; the nap/doze bits are set.
958;
959 .globl EXT(machine_idle_ret)
960LEXT(machine_idle_ret)
961 mtmsr r7 ; Make sure the MSR is what we want
962 isync ; In case we turn on translation
91447636
A
963;
964; Protect against a lost decrementer trap if the current decrementer value is negative
965; by more than 10 ticks, re-arm it since it is unlikely to fire at this point...
966; A hardware interrupt got us out of machine_idle and may also be contributing to this state
967;
968 mfdec r6 ; Get decrementer
969 cmpwi cr0,r6,-10 ; Compare decrementer with -10
970 bgelr++ ; Return if greater
971 li r0,1 ; Load 1
972 mtdec r0 ; Set decrementer to 1
1c79356b
A
973 blr ; Return...
974
975/* Put machine to sleep.
976 * This call never returns. We always exit sleep via a soft reset.
977 * All external interruptions must be drained at this point and disabled.
978 *
91447636 979 * void ml_ppc_do_sleep(void)
1c79356b
A
980 *
981 * We will use the PPC SLEEP for this.
982 *
983 * There is one bit of hackery in here: we need to enable for
984 * interruptions when we go to sleep and there may be a pending
91447636
A
985 * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for
986 * interruptions. The decrimenter rupt vector recognizes this and returns
1c79356b
A
987 * directly back here.
988 *
989 */
990
991; Force a line boundry here
992 .align 5
91447636 993 .globl EXT(ml_ppc_do_sleep)
3a60a9f5 994
91447636
A
995LEXT(ml_ppc_do_sleep)
996
1c79356b
A
997#if 0
998 mfmsr r5 ; Hack to spin instead of sleep
999 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1000 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1001 mtmsr r5 ; No talking
1002 isync
1003
1c79356b
A
1004deadsleep: addi r3,r3,1 ; Make analyzer happy
1005 addi r3,r3,1
1006 addi r3,r3,1
1007 b deadsleep ; Die the death of 1000 joys...
1008#endif
1009
91447636
A
1010 mfsprg r12,1 ; Get the current activation
1011 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
1c79356b 1012 mfsprg r11,2 ; Get CPU specific features
91447636
A
1013 eqv r10,r10,r10 ; Get all foxes
1014 mtcrf 0x04,r11 ; move pfNoMSRirb to cr5
1015 mfspr r4,hid0 ; Get the current power-saving mode
1016 mtcrf 0x02,r11 ; move pf64Bit to cr6
9bccf70c 1017
55e303ae 1018 rlwinm. r5,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before sleep?
9bccf70c
A
1019 beq mpsL2PFok
1020
1021 mfspr r5,msscr0 ; Get currect MSSCR0 value
55e303ae 1022 rlwinm r5,r5,0,0,l2pfes-1 ; Disable L2 Prefetch
9bccf70c
A
1023 mtspr msscr0,r5 ; Updates MSSCR0 value
1024 sync
1025 isync
1026
1027mpsL2PFok:
91447636 1028 bt++ pf64Bitb,mpsPF64bit ; PM bits are shifted on 64bit systems.
55e303ae
A
1029
1030 rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though)
1031 oris r4,r4,hi16(sleepm) ; Set sleep
4a249263 1032 b mpsClearDEC
55e303ae
A
1033
1034mpsPF64bit:
4a249263 1035 lis r5, hi16(dozem|napm|sleepm) ; Clear all possible power-saving modes (not DPM though)
55e303ae
A
1036 sldi r5, r5, 32
1037 andc r4, r4, r5
4a249263 1038 lis r5, hi16(napm) ; Set sleep
55e303ae 1039 sldi r5, r5, 32
4a249263 1040 or r4, r4, r5
55e303ae
A
1041
1042mpsClearDEC:
1c79356b
A
1043 mfmsr r5 ; Get the current MSR
1044 rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF
91447636 1045 mtdec r10 ; Load decrimenter with 0x7FFFFFFF
1c79356b
A
1046 isync ; and make sure,
1047 mfdec r9 ; really sure, it gets there
1048
91447636 1049 li r2,1 ; Prepare for 64 bit
1c79356b
A
1050 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1051;
1052; Note that we need translation off before we set the HID to sleep. Otherwise
1053; we will ignore any PTE misses that occur and cause an infinite loop.
1054;
91447636
A
1055 bf++ pf64Bitb,mpsCheckMSR ; check 64-bit processor
1056 rldimi r5,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
1057 mtmsrd r5 ; set 64-bit mode, turn off EE, DR, and IR
1058 isync ; Toss prefetch
1059 b mpsNoMSRx
1060
1061mpsCheckMSR:
1c79356b
A
1062 bt pfNoMSRirb,mpsNoMSR ; No MSR...
1063
1064 mtmsr r5 ; Translation off
1065 isync ; Toss prefetch
1066 b mpsNoMSRx
1067
1068mpsNoMSR:
1069 li r0,loadMSR ; Get the MSR setter SC
1070 mr r3,r5 ; Get new MSR
1071 sc ; Set it
1072mpsNoMSRx:
1073
1074 ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE
1075 sync
1076 mtspr hid0,r4 ; Set up the HID to sleep
55e303ae
A
1077 mfspr r4,hid0 ; Yes, this is silly, keep it here
1078 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1079 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1080 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1081 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1082 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1c79356b 1083
91447636 1084 mtmsr r3 ; Enable for interrupts to drain decrimenter
1c79356b
A
1085
1086 add r6,r4,r5 ; Just waste time
1087 add r6,r6,r4 ; A bit more
1088 add r6,r6,r5 ; A bit more
1089
1090 mtmsr r5 ; Interruptions back off
1091 isync ; Toss prefetch
1092
1c79356b
A
1093;
1094; We are here with translation off, interrupts off, all possible
91447636 1095; interruptions drained off, and a decrimenter that will not pop.
1c79356b
A
1096;
1097
1098 bl EXT(cacheInit) ; Clear out the caches. This will leave them on
1099 bl EXT(cacheDisable) ; Turn off all caches
1100
1101 mfmsr r5 ; Get the current MSR
1102 oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
1103 ; Leave EE off because power goes off shortly
55e303ae
A
1104 mfsprg r12,0 ; Get the per_proc_info
1105 li r10,PP_CPU_FLAGS
1106 lhz r11,PP_CPU_FLAGS(r12) ; Get the flags
1107 ori r11,r11,SleepState ; Marked SleepState
1108 sth r11,PP_CPU_FLAGS(r12) ; Set the flags
1109 dcbf r10,r12
4a249263
A
1110
1111 mfsprg r11,2 ; Get CPU specific features
1112 rlwinm. r0,r11,0,pf64Bitb,pf64Bitb ; Test for 64 bit processor
1113 eqv r4,r4,r4 ; Get all foxes
1114 rlwinm r4,r4,0,1,31 ; Make 0x7FFFFFFF
1115 beq slSleepNow ; skip if 32-bit...
91447636
A
1116 li r3, 0x4000 ; Cause decrimenter to roll over soon
1117 mtdec r3 ; Load decrimenter with 0x00004000
4a249263
A
1118 isync ; and make sure,
1119 mfdec r3 ; really sure, it gets there
1120
55e303ae
A
1121slSleepNow:
1122 sync ; Sync it all up
1c79356b
A
1123 mtmsr r5 ; Do sleep with interruptions enabled
1124 isync ; Take a pill
91447636 1125 mtdec r4 ; Load decrimenter with 0x7FFFFFFF
4a249263
A
1126 isync ; and make sure,
1127 mfdec r3 ; really sure, it gets there
1c79356b
A
1128 b slSleepNow ; Go back to sleep if we wake up...
1129
1130
1131
1132/* Initialize all caches including the TLBs
1133 *
1134 * void cacheInit(void)
1135 *
1136 * This is used to force the caches to an initial clean state. First, we
1137 * check if the cache is on, if so, we need to flush the contents to memory.
1138 * Then we invalidate the L1. Next, we configure and invalidate the L2 etc.
1139 * Finally we turn on all of the caches
1140 *
1141 * Note that if translation is not disabled when this is called, the TLB will not
1142 * be completely clear after return.
1143 *
1144 */
1145
1146; Force a line boundry here
1147 .align 5
1148 .globl EXT(cacheInit)
1149
1150LEXT(cacheInit)
1151
1152 mfsprg r12,0 ; Get the per_proc_info
1153 mfspr r9,hid0 ; Get the current power-saving mode
1154
1155 mfsprg r11,2 ; Get CPU specific features
1156 mfmsr r7 ; Get the current MSR
9bccf70c
A
1157 rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1158 rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
0b4e3aa0 1159 rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs)
1c79356b
A
1160 rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1161 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1162 mtcrf 0x87,r11 ; Get the feature flags
55e303ae
A
1163 lis r10,hi16(dozem|napm|sleepm|dpmm) ; Mask of power management bits
1164 bf-- pf64Bitb,cIniNSF1 ; Skip if 32-bit...
1165
1166 sldi r10,r10,32 ; Position the masks
1167
1168cIniNSF1: andc r4,r9,r10 ; Clean up the old power bits
1c79356b 1169 mtspr hid0,r4 ; Set up the HID
55e303ae
A
1170 mfspr r4,hid0 ; Yes, this is silly, keep it here
1171 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1172 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1173 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1174 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1175 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1c79356b
A
1176
1177 bt pfNoMSRirb,ciNoMSR ; No MSR...
1178
1179 mtmsr r5 ; Translation and all off
1180 isync ; Toss prefetch
1181 b ciNoMSRx
1182
1183ciNoMSR:
1184 li r0,loadMSR ; Get the MSR setter SC
1185 mr r3,r5 ; Get new MSR
1186 sc ; Set it
1187ciNoMSRx:
1188
1189 bf pfAltivecb,cinoDSS ; No Altivec here...
1190
1191 dssall ; Stop streams
1192 sync
1193
55e303ae 1194cinoDSS: li r5,tlbieLock ; Get the TLBIE lock
1c79356b 1195 li r0,128 ; Get number of TLB entries
1c79356b
A
1196
1197 li r6,0 ; Start at 0
55e303ae
A
1198 bf-- pf64Bitb,citlbhang ; Skip if 32-bit...
1199 li r0,1024 ; Get the number of TLB entries
1c79356b
A
1200
1201citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock
1202 mr. r2,r2 ; Is it locked?
1203 bne- citlbhang ; It is locked, go wait...
1204 stwcx. r0,0,r5 ; Try to get it
1205 bne- citlbhang ; We was beat...
1206
1207 mtctr r0 ; Set the CTR
1208
1209cipurgeTLB: tlbie r6 ; Purge this entry
1210 addi r6,r6,4096 ; Next page
1211 bdnz cipurgeTLB ; Do them all...
1212
1213 mtcrf 0x80,r11 ; Set SMP capability
1214 sync ; Make sure all TLB purges are done
1215 eieio ; Order, order in the court
1216
1217 bf pfSMPcapb,cinoSMP ; SMP incapable...
1218
1219 tlbsync ; Sync all TLBs
1220 sync
150bd074 1221 isync
1c79356b 1222
55e303ae
A
1223 bf-- pf64Bitb,cinoSMP ; Skip if 32-bit...
1224 ptesync ; Wait for quiet again
1225 sync
1226
1227cinoSMP: stw r2,tlbieLock(0) ; Unlock TLBIE lock
1228
1229 bt++ pf64Bitb,cin64 ; Skip if 64-bit...
1c79356b 1230
1c79356b
A
1231 rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on?
1232 beq- cinoL1 ; No, no need to flush...
1233
55e303ae
A
1234 rlwinm. r0,r11,0,pfL1fab,pfL1fab ; do we have L1 flush assist?
1235 beq ciswdl1 ; If no hw flush assist, go do by software...
1c79356b
A
1236
1237 mfspr r8,msscr0 ; Get the memory system control register
1238 oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request
1239
1240 mtspr msscr0,r8 ; Start the flush operation
1241
1242ciwdl1f: mfspr r8,msscr0 ; Get the control register again
1243
1244 rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet?
1245 bne ciwdl1f ; No, flush is still in progress...
1246 b ciinvdl1 ; Go invalidate l1...
1247
1248;
1249; We need to either make this very complicated or to use ROM for
1250; the flush. The problem is that if during the following sequence a
1251; snoop occurs that invalidates one of the lines in the cache, the
1252; PLRU sequence will be altered making it possible to miss lines
1253; during the flush. So, we either need to dedicate an area of RAM
1254; to each processor, lock use of a RAM area, or use ROM. ROM is
1255; by far the easiest. Note that this is not an issue for machines
1256; that have harware flush assists.
1257;
1258
1259ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size
0b4e3aa0
A
1260
1261 bf 31,cisnlck ; Skip if pfLClck not set...
1262
91447636
A
1263 mfspr r4,msscr0 ; ?
1264 rlwinm r6,r4,0,0,l2pfes-1 ; ?
0b4e3aa0
A
1265 mtspr msscr0,r6 ; Set it
1266 sync
1267 isync
1268
1269 mfspr r8,ldstcr ; Save the LDSTCR
1270 li r2,1 ; Get a mask of 0x01
1271 lis r3,0xFFF0 ; Point to ROM
1272 rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes
1273
1274 li r6,0 ; Start here
1275
1276cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use
1277 addi r6,r6,32 ; Bump to the next
1278 cmplw r6,r0 ; Have we reached the end?
1279 blt+ cisiniflsh ; Nope, continue initial flush...
1280
1281 sync ; Make sure it is done
1282
1283 addi r11,r11,-1 ; Get mask for index wrap
1284 li r6,0 ; Get starting offset
1285
1286cislckit: not r5,r2 ; Lock all but 1 way
1287 rlwimi r5,r8,0,0,23 ; Build LDSTCR
1288 mtspr ldstcr,r5 ; Lock a way
1289 sync ; Clear out memory accesses
1290 isync ; Wait for all
1291
1292
1293cistouch: lwzx r10,r3,r6 ; Pick up some trash
1294 addi r6,r6,32 ; Go to the next index
1295 and. r0,r6,r11 ; See if we are about to do next index
1296 bne+ cistouch ; Nope, do more...
1297
1298 sync ; Make sure it is all done
1299 isync
1300
1301 sub r6,r6,r11 ; Back up to start + 1
1302 addi r6,r6,-1 ; Get it right
1303
1304cisflush: dcbf r3,r6 ; Flush everything out
1305 addi r6,r6,32 ; Go to the next index
1306 and. r0,r6,r11 ; See if we are about to do next index
1307 bne+ cisflush ; Nope, do more...
1308
1309 sync ; Make sure it is all done
1310 isync
1311
1312
1313 rlwinm. r2,r2,1,24,31 ; Shift to next way
1314 bne+ cislckit ; Do this for all ways...
1315
1316 mtspr ldstcr,r8 ; Slam back to original
1317 sync
1318 isync
1319
91447636 1320 mtspr msscr0,r4 ; ?
0b4e3aa0
A
1321 sync
1322 isync
1323
1324 b cinoL1 ; Go on to level 2...
1325
1326
1327cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size
1c79356b
A
1328 add r0,r0,r2 ; Get 3 times cache size
1329 rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines
1330 lis r3,0xFFF0 ; Dead recon ROM address for now
1331 mtctr r0 ; Number of lines to flush
1332
1333ciswfldl1a: lwz r2,0(r3) ; Flush anything else
1334 addi r3,r3,32 ; Next line
1335 bdnz ciswfldl1a ; Flush the lot...
1336
1337ciinvdl1: sync ; Make sure all flushes have been committed
1338
1339 mfspr r8,hid0 ; Get the HID0 bits
1340 rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables
1341 mtspr hid0,r8 ; and turn off L1 cache
1342 sync ; Make sure all is done
0b4e3aa0
A
1343 isync
1344
1c79356b
A
1345 ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1346 sync
1347 isync
1348
1349 mtspr hid0,r8 ; Start the invalidate and turn on cache
1350 rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1351 mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines)
1352 sync
0b4e3aa0 1353
1c79356b
A
1354
1355cinoL1:
1356;
1357; Flush and disable the level 2
1358;
55e303ae
A
1359 mfsprg r10,2 ; need to check 2 features we did not put in CR
1360 rlwinm. r0,r10,0,pfL2b,pfL2b ; do we have L2?
1361 beq cinol2 ; No level 2 cache to flush
1c79356b
A
1362
1363 mfspr r8,l2cr ; Get the L2CR
1364 lwz r3,pfl2cr(r12) ; Get the L2CR value
d52fe63f
A
1365 rlwinm. r0,r8,0,l2e,l2e ; Was the L2 enabled?
1366 bne ciflushl2 ; Yes, force flush
1367 cmplwi r8, 0 ; Was the L2 all the way off?
1368 beq ciinvdl2 ; Yes, force invalidate
1c79356b
A
1369 lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits
1370 xor r2,r8,r3 ; Get changing bits?
1371 ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits
1372 and. r0,r0,r2 ; Did any change?
1373 bne- ciinvdl2 ; Yes, just invalidate and get PLL synced...
1374
d52fe63f 1375ciflushl2:
55e303ae
A
1376 rlwinm. r0,r10,0,pfL2fab,pfL2fab ; hardware-assisted L2 flush?
1377 beq ciswfl2 ; Flush not in hardware...
1c79356b 1378
d52fe63f 1379 mr r10,r8 ; Take a copy now
1c79356b 1380
0b4e3aa0 1381 bf 31,cinol2lck ; Skip if pfLClck not set...
1c79356b
A
1382
1383 oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only
1384 sync
1385 mtspr l2cr,r10 ; Lock out the cache
1386 sync
1387 isync
1388
1389cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush
1390 sync ; Make sure everything is done
1391
1392 mtspr l2cr,r10 ; Request flush
1393
1394cihwfl2: mfspr r10,l2cr ; Get back the L2CR
1395 rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over?
1396 bne+ cihwfl2 ; Nope, keep going...
1397 b ciinvdl2 ; Flush done, go invalidate L2...
1398
1399ciswfl2:
1400 lwz r0,pfl2Size(r12) ; Get the L2 size
d52fe63f 1401 oris r2,r8,hi16(l2dom) ; Set L2 to data only mode
0b4e3aa0
A
1402
1403 b ciswfl2doa ; Branch to next line...
1404
1405 .align 5
1406ciswfl2doc:
1407 mtspr l2cr,r2 ; Disable L2
1408 sync
1409 isync
1410 b ciswfl2dod ; It is off, go invalidate it...
1411
1412ciswfl2doa:
1413 b ciswfl2dob ; Branch to next...
1414
1415ciswfl2dob:
1416 sync ; Finish memory stuff
1417 isync ; Stop speculation
1418 b ciswfl2doc ; Jump back up and turn on data only...
1419ciswfl2dod:
1c79356b
A
1420 rlwinm r0,r0,27,5,31 ; Get the number of lines
1421 lis r10,0xFFF0 ; Dead recon ROM for now
1422 mtctr r0 ; Set the number of lines
1423
1424ciswfldl2a: lwz r0,0(r10) ; Load something to flush something
1425 addi r10,r10,32 ; Next line
1426 bdnz ciswfldl2a ; Do the lot...
1427
55e303ae 1428ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Clear the enable bit
1c79356b
A
1429 b cinla ; Branch to next line...
1430
1431 .align 5
d52fe63f 1432cinlc: mtspr l2cr,r8 ; Disable L2
1c79356b
A
1433 sync
1434 isync
1435 b ciinvl2 ; It is off, go invalidate it...
1436
1437cinla: b cinlb ; Branch to next...
1438
1439cinlb: sync ; Finish memory stuff
1440 isync ; Stop speculation
1441 b cinlc ; Jump back up and turn off cache...
1442
1443ciinvl2: sync
1444 isync
d52fe63f
A
1445
1446 cmplwi r3, 0 ; Should the L2 be all the way off?
1447 beq cinol2 ; Yes, done with L2
1448
1449 oris r2,r8,hi16(l2im) ; Get the invalidate flag set
1c79356b
A
1450
1451 mtspr l2cr,r2 ; Start the invalidate
1452 sync
1453 isync
1454ciinvdl2a: mfspr r2,l2cr ; Get the L2CR
55e303ae
A
1455 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1456 rlwinm. r0,r0,0,pfL2ib,pfL2ib ; flush in HW?
1457 beq ciinvdl2b ; Flush not in hardware...
1c79356b
A
1458 rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going?
1459 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1460 sync
1461 b cinol2 ; No level 2 cache to flush
1462ciinvdl2b:
1463 rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going?
1464 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1465 sync
d52fe63f 1466 mtspr l2cr,r8 ; Turn off the invalidate request
1c79356b
A
1467
1468cinol2:
1469
1470;
1471; Flush and enable the level 3
1472;
1473 bf pfL3b,cinol3 ; No level 3 cache to flush
1474
1475 mfspr r8,l3cr ; Get the L3CR
1476 lwz r3,pfl3cr(r12) ; Get the L3CR value
d52fe63f
A
1477 rlwinm. r0,r8,0,l3e,l3e ; Was the L3 enabled?
1478 bne ciflushl3 ; Yes, force flush
1479 cmplwi r8, 0 ; Was the L3 all the way off?
1480 beq ciinvdl3 ; Yes, force invalidate
1c79356b
A
1481 lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits
1482 xor r2,r8,r3 ; Get changing bits?
1483 ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits
1484 and. r0,r0,r2 ; Did any change?
1485 bne- ciinvdl3 ; Yes, just invalidate and get PLL synced...
1486
d52fe63f 1487ciflushl3:
1c79356b 1488 sync ; 7450 book says do this even though not needed
d52fe63f 1489 mr r10,r8 ; Take a copy now
1c79356b
A
1490
1491 bf 31,cinol3lck ; Skip if pfL23lck not set...
1492
1493 oris r10,r10,hi16(l3iom) ; Set instruction-only
1494 ori r10,r10,lo16(l3donlym) ; Set data-only
1495 sync
1496 mtspr l3cr,r10 ; Lock out the cache
1497 sync
1498 isync
1499
1500cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush
1501 sync ; Make sure everything is done
1502
1503 mtspr l3cr,r10 ; Request flush
1504
1505cihwfl3: mfspr r10,l3cr ; Get back the L3CR
1506 rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over?
1507 bne+ cihwfl3 ; Nope, keep going...
1508
55e303ae 1509ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Clear the enable bit
1c79356b 1510 sync ; Make sure of life, liberty, and justice
d52fe63f 1511 mtspr l3cr,r8 ; Disable L3
1c79356b
A
1512 sync
1513
d52fe63f
A
1514 cmplwi r3, 0 ; Should the L3 be all the way off?
1515 beq cinol3 ; Yes, done with L3
1c79356b 1516
d52fe63f 1517 ori r8,r8,lo16(l3im) ; Get the invalidate flag set
1c79356b 1518
d52fe63f
A
1519 mtspr l3cr,r8 ; Start the invalidate
1520
1521ciinvdl3b: mfspr r8,l3cr ; Get the L3CR
1522 rlwinm. r8,r8,0,l3i,l3i ; Is the invalidate still going?
1c79356b
A
1523 bne+ ciinvdl3b ; Assume so...
1524 sync
1525
91447636
A
1526 lwz r10, pfBootConfig(r12) ; ?
1527 rlwinm. r10, r10, 24, 28, 31 ; ?
1528 beq ciinvdl3nopdet ; ?
1529
1530 mfspr r8,l3pdet ; ?
1531 srw r2, r8, r10 ; ?
1532 rlwimi r2, r8, 0, 24, 31 ; ?
1533 subfic r10, r10, 32 ; ?
1534 li r8, -1 ; ?
1535 ori r2, r2, 0x0080 ; ?
1536 slw r8, r8, r10 ; ?
1537 or r8, r2, r8 ; ?
1538 mtspr l3pdet, r8 ; ?
1c79356b
A
1539 isync
1540
7b1edb79 1541ciinvdl3nopdet:
d52fe63f
A
1542 mfspr r8,l3cr ; Get the L3CR
1543 rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
1544 mtspr l3cr,r8 ; Disable the clock
1c79356b 1545
91447636
A
1546 li r2,128 ; ?
1547ciinvdl3c: addi r2,r2,-1 ; ?
1548 cmplwi r2,0 ; ?
1c79356b
A
1549 bne+ ciinvdl3c
1550
91447636
A
1551 mfspr r10,msssr0 ; ?
1552 rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ?
1553 mtspr msssr0,r10 ; ?
1c79356b
A
1554 sync
1555
d52fe63f 1556 mtspr l3cr,r3 ; Enable it as desired
1c79356b
A
1557 sync
1558cinol3:
55e303ae
A
1559 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1560 rlwinm. r0,r0,0,pfL2b,pfL2b ; is there an L2 cache?
1561 beq cinol2a ; No level 2 cache to enable
1c79356b
A
1562
1563 lwz r3,pfl2cr(r12) ; Get the L2CR value
d52fe63f 1564 cmplwi r3, 0 ; Should the L2 be all the way off?
55e303ae 1565 beq cinol2a : Yes, done with L2
d52fe63f 1566 mtspr l2cr,r3 ; Enable it as desired
1c79356b
A
1567 sync
1568
1569;
1570; Invalidate and turn on L1s
1571;
1572
0b4e3aa0
A
1573cinol2a:
1574 bt 31,cinoexit ; Skip if pfLClck set...
1575
1576 rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1c79356b
A
1577 mtspr hid0,r8 ; Turn off dem caches
1578 sync
1579
1580 ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1581 rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1582 sync
1583 isync
1584
1585 mtspr hid0,r8 ; Start the invalidate and turn on L1 cache
0b4e3aa0
A
1586
1587cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions
1c79356b
A
1588 sync
1589 mtmsr r7 ; Restore MSR to entry
1590 isync
1591 blr ; Return...
1592
1593
55e303ae
A
1594;
1595; Handle 64-bit architecture
1596; This processor can not run without caches, so we just push everything out
1597; and flush. It will be relativily clean afterwards
1598;
1599
1600 .align 5
1601
1602cin64:
55e303ae
A
1603 mfspr r10,hid1 ; Save hid1
1604 mfspr r4,hid4 ; Save hid4
1605 mr r12,r10 ; Really save hid1
1606 mr r11,r4 ; Get a working copy of hid4
1607
1608 li r0,0 ; Get a 0
1609 eqv r2,r2,r2 ; Get all foxes
1610
1611 rldimi r10,r0,55,7 ; Clear I$ prefetch bits (7:8)
1612
1613 isync
1614 mtspr hid1,r10 ; Stick it
1615 mtspr hid1,r10 ; Stick it again
1616 isync
1617
1618 rldimi r11,r2,38,25 ; Disable D$ prefetch (25:25)
1619
1620 sync
1621 mtspr hid4,r11 ; Stick it
1622 isync
1623
1624 li r3,8 ; Set bit 28+32
1625 sldi r3,r3,32 ; Make it bit 28
1626 or r3,r3,r11 ; Turn on the flash invalidate L1D$
1627
1628 oris r5,r11,0x0600 ; Set disable L1D$ bits
1629 sync
1630 mtspr hid4,r3 ; Invalidate
1631 isync
1632
1633 mtspr hid4,r5 ; Un-invalidate and disable L1D$
1634 isync
1635
1636 lis r8,GUSModeReg ; Get the GUS mode ring address
1637 mfsprg r0,2 ; Get the feature flags
1638 ori r8,r8,0x8000 ; Set to read data
1639 rlwinm. r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
1640
1641 sync
1642
1643 mtspr scomc,r8 ; Request the GUS mode
1644 mfspr r11,scomd ; Get the GUS mode
1645 mfspr r8,scomc ; Get back the status (we just ignore it)
1646 sync
1647 isync
1648
1649 sld r11,r11,r0 ; Fix up if needed
1650
1651 ori r6,r11,lo16(GUSMdmapen) ; Set the bit that means direct L2 cache address
1652 lis r8,GUSModeReg ; Get GUS mode register address
1653
1654 sync
1655
1656 mtspr scomd,r6 ; Set that we want direct L2 mode
1657 mtspr scomc,r8 ; Tell GUS we want direct L2 mode
1658 mfspr r3,scomc ; Get back the status
1659 sync
1660 isync
1661
1662 li r3,0 ; Clear start point
1663
1664cflushlp: lis r6,0x0040 ; Pick 4MB line as our target
1665 or r6,r6,r3 ; Put in the line offset
1666 lwz r5,0(r6) ; Load a line
1667 addis r6,r6,8 ; Roll bit 42:44
1668 lwz r5,0(r6) ; Load a line
1669 addis r6,r6,8 ; Roll bit 42:44
1670 lwz r5,0(r6) ; Load a line
1671 addis r6,r6,8 ; Roll bit 42:44
1672 lwz r5,0(r6) ; Load a line
1673 addis r6,r6,8 ; Roll bit 42:44
1674 lwz r5,0(r6) ; Load a line
1675 addis r6,r6,8 ; Roll bit 42:44
1676 lwz r5,0(r6) ; Load a line
1677 addis r6,r6,8 ; Roll bit 42:44
1678 lwz r5,0(r6) ; Load a line
1679 addis r6,r6,8 ; Roll bit 42:44
1680 lwz r5,0(r6) ; Load a line
1681
1682 addi r3,r3,128 ; Next line
1683 andis. r5,r3,8 ; Have we done enough?
1684 beq++ cflushlp ; Not yet...
1685
1686 sync
1687
1688 lis r6,0x0040 ; Pick 4MB line as our target
1689
1690cflushx: dcbf 0,r6 ; Flush line and invalidate
1691 addi r6,r6,128 ; Next line
1692 andis. r5,r6,0x0080 ; Have we done enough?
1693 beq++ cflushx ; Keep on flushing...
1694
1695 mr r3,r10 ; Copy current hid1
1696 rldimi r3,r2,54,9 ; Set force icbi match mode
1697
1698 li r6,0 ; Set start if ICBI range
1699 isync
1700 mtspr hid1,r3 ; Stick it
1701 mtspr hid1,r3 ; Stick it again
1702 isync
1703
1704cflicbi: icbi 0,r6 ; Kill I$
1705 addi r6,r6,128 ; Next line
1706 andis. r5,r6,1 ; Have we done them all?
1707 beq++ cflicbi ; Not yet...
1708
1709 lis r8,GUSModeReg ; Get GUS mode register address
1710
1711 sync
1712
1713 mtspr scomd,r11 ; Set that we do not want direct mode
1714 mtspr scomc,r8 ; Tell GUS we do not want direct mode
1715 mfspr r3,scomc ; Get back the status
1716 sync
1717 isync
4a249263
A
1718
1719 isync
1720 mtspr hid0,r9 ; Restore entry hid0
1721 mfspr r9,hid0 ; Yes, this is silly, keep it here
1722 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1723 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1724 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1725 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1726 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1727 isync
1728
55e303ae
A
1729 isync
1730 mtspr hid1,r12 ; Restore entry hid1
1731 mtspr hid1,r12 ; Stick it again
1732 isync
1733
1734 sync
1735 mtspr hid4,r4 ; Restore entry hid4
1736 isync
1737
1738 sync
1739 mtmsr r7 ; Restore MSR to entry
1740 isync
1741 blr ; Return...
1742
1743
1744
1c79356b
A
1745/* Disables all caches
1746 *
1747 * void cacheDisable(void)
1748 *
1749 * Turns off all caches on the processor. They are not flushed.
1750 *
1751 */
1752
1753; Force a line boundry here
1754 .align 5
1755 .globl EXT(cacheDisable)
1756
1757LEXT(cacheDisable)
1758
1759 mfsprg r11,2 ; Get CPU specific features
1760 mtcrf 0x83,r11 ; Set feature flags
1761
1762 bf pfAltivecb,cdNoAlt ; No vectors...
1763
1764 dssall ; Stop streams
1765
1766cdNoAlt: sync
1767
55e303ae
A
1768 btlr pf64Bitb ; No way to disable a 64-bit machine...
1769
1c79356b
A
1770 mfspr r5,hid0 ; Get the hid
1771 rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1772 mtspr hid0,r5 ; Turn off dem caches
1773 sync
1774
55e303ae
A
1775 rlwinm. r0,r11,0,pfL2b,pfL2b ; is there an L2?
1776 beq cdNoL2 ; Skip if no L2...
1777
1c79356b
A
1778 mfspr r5,l2cr ; Get the L2
1779 rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit
1780
1781 b cinlaa ; Branch to next line...
1782
1783 .align 5
1784cinlcc: mtspr l2cr,r5 ; Disable L2
1785 sync
1786 isync
1787 b cdNoL2 ; It is off, we are done...
1788
1789cinlaa: b cinlbb ; Branch to next...
1790
1791cinlbb: sync ; Finish memory stuff
1792 isync ; Stop speculation
1793 b cinlcc ; Jump back up and turn off cache...
1794
1795cdNoL2:
55e303ae 1796
1c79356b
A
1797 bf pfL3b,cdNoL3 ; Skip down if no L3...
1798
1799 mfspr r5,l3cr ; Get the L3
1800 rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit
1801 rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit
1802 mtspr l3cr,r5 ; Disable the caches
1803 sync
1804
1805cdNoL3:
1806 blr ; Leave...
1807
1808
1809/* Initialize processor thermal monitoring
1810 * void ml_thrm_init(void)
1811 *
483a1d10 1812 * Obsolete, deprecated and will be removed.
1c79356b
A
1813 */
1814
1815; Force a line boundry here
1816 .align 5
1817 .globl EXT(ml_thrm_init)
1818
1819LEXT(ml_thrm_init)
1c79356b
A
1820 blr
1821
1c79356b
A
1822/* Set thermal monitor bounds
1823 * void ml_thrm_set(unsigned int low, unsigned int high)
1824 *
483a1d10 1825 * Obsolete, deprecated and will be removed.
1c79356b
A
1826 */
1827
1828; Force a line boundry here
1829 .align 5
1830 .globl EXT(ml_thrm_set)
1831
1832LEXT(ml_thrm_set)
483a1d10 1833 blr
1c79356b
A
1834
1835/* Read processor temprature
1836 * unsigned int ml_read_temp(void)
1837 *
483a1d10 1838 * Obsolete, deprecated and will be removed.
1c79356b
A
1839 */
1840
1841; Force a line boundry here
1842 .align 5
1843 .globl EXT(ml_read_temp)
1844
1845LEXT(ml_read_temp)
483a1d10
A
1846 li r3,-1
1847 blr
1c79356b
A
1848
1849/* Throttle processor speed up or down
1850 * unsigned int ml_throttle(unsigned int step)
1851 *
1852 * Returns old speed and sets new. Both step and return are values from 0 to
1853 * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2.
1854 *
483a1d10 1855 * Obsolete, deprecated and will be removed.
1c79356b
A
1856 */
1857
1858; Force a line boundry here
1859 .align 5
1860 .globl EXT(ml_throttle)
1861
1862LEXT(ml_throttle)
483a1d10
A
1863 li r3,0
1864 blr
1c79356b
A
1865
1866/*
1867** ml_get_timebase()
1868**
1869** Entry - R3 contains pointer to 64 bit structure.
1870**
1871** Exit - 64 bit structure filled in.
1872**
1873*/
1874; Force a line boundry here
1875 .align 5
1876 .globl EXT(ml_get_timebase)
1877
1878LEXT(ml_get_timebase)
1879
1880loop:
55e303ae
A
1881 mftbu r4
1882 mftb r5
1883 mftbu r6
1884 cmpw r6, r4
1885 bne- loop
1886
1887 stw r4, 0(r3)
1888 stw r5, 4(r3)
1889
1890 blr
1c79356b 1891
55e303ae
A
1892/*
1893 * unsigned int cpu_number(void)
1894 *
1895 * Returns the current cpu number.
1896 */
1897
1898 .align 5
1899 .globl EXT(cpu_number)
1900
1901LEXT(cpu_number)
91447636
A
1902 mfsprg r4,1 ; Get the current activation
1903 lwz r4,ACT_PER_PROC(r4) ; Get the per_proc block
55e303ae
A
1904 lhz r3,PP_CPU_NUMBER(r4) ; Get CPU number
1905 blr ; Return...
9bccf70c 1906
91447636
A
1907/*
1908 * processor_t current_processor(void)
1909 *
1910 * Returns the current processor.
1911 */
1912
1913 .align 5
1914 .globl EXT(current_processor)
1915
1916LEXT(current_processor)
1917 mfsprg r3,1 ; Get the current activation
1918 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1919 addi r3,r3,PP_PROCESSOR
1920 blr
1921
1922#if PROCESSOR_SIZE > PP_PROCESSOR_SIZE
1923#error processor overflows per_proc
1924#endif
d7e50217
A
1925
1926/*
91447636 1927 * ast_t *ast_pending(void)
55e303ae 1928 *
91447636
A
1929 * Returns the address of the pending AST mask for the current processor.
1930 */
1931
1932 .align 5
1933 .globl EXT(ast_pending)
1934
1935LEXT(ast_pending)
1936 mfsprg r3,1 ; Get the current activation
1937 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1938 addi r3,r3,PP_PENDING_AST
1939 blr ; Return...
1940
1941/*
1942 * void machine_set_current_thread(thread_t)
1943 *
1944 * Set the current thread
d7e50217 1945 */
55e303ae 1946 .align 5
91447636 1947 .globl EXT(machine_set_current_thread)
d7e50217 1948
91447636 1949LEXT(machine_set_current_thread)
55e303ae 1950
91447636
A
1951 mfsprg r4,1 ; Get spr1
1952 lwz r5,ACT_PER_PROC(r4) ; Get the PerProc from the previous active thread
1953 stw r5,ACT_PER_PROC(r3) ; Set the PerProc in the active thread
55e303ae
A
1954 mtsprg 1,r3 ; Set spr1 with the active thread
1955 blr ; Return...
1956
1957/*
55e303ae 1958 * thread_t current_thread(void)
91447636 1959 * thread_t current_act(void)
55e303ae
A
1960 *
1961 *
1962 * Return the current thread for outside components.
1963 */
1964 .align 5
55e303ae 1965 .globl EXT(current_thread)
91447636 1966 .globl EXT(current_act)
55e303ae 1967
55e303ae 1968LEXT(current_thread)
91447636 1969LEXT(current_act)
55e303ae
A
1970
1971 mfsprg r3,1
1972 blr
1973
1974 .align 5
1975 .globl EXT(clock_get_uptime)
1976LEXT(clock_get_uptime)
19771: mftbu r9
1978 mftb r0
1979 mftbu r11
1980 cmpw r11,r9
91447636 1981 bne-- 1b
55e303ae
A
1982 stw r0,4(r3)
1983 stw r9,0(r3)
1984 blr
1985
1986
1987 .align 5
1988 .globl EXT(mach_absolute_time)
1989LEXT(mach_absolute_time)
19901: mftbu r3
1991 mftb r4
1992 mftbu r0
1993 cmpw r0,r3
91447636 1994 bne-- 1b
55e303ae 1995 blr
9bccf70c 1996
1c79356b
A
1997/*
1998** ml_sense_nmi()
1999**
2000*/
2001; Force a line boundry here
2002 .align 5
2003 .globl EXT(ml_sense_nmi)
2004
2005LEXT(ml_sense_nmi)
2006
2007 blr ; Leave...
2008
d52fe63f 2009/*
91447636 2010** ml_set_processor_speed_powertune()
d52fe63f
A
2011**
2012*/
2013; Force a line boundry here
2014 .align 5
5353443c 2015 .globl EXT(ml_set_processor_speed_powertune)
d52fe63f 2016
5353443c 2017LEXT(ml_set_processor_speed_powertune)
483a1d10
A
2018 mflr r0 ; Save the link register
2019 stwu r1, -(FM_ALIGN(4*4)+FM_SIZE)(r1) ; Make some space on the stack
2020 stw r28, FM_ARG0+0x00(r1) ; Save a register
2021 stw r29, FM_ARG0+0x04(r1) ; Save a register
2022 stw r30, FM_ARG0+0x08(r1) ; Save a register
2023 stw r31, FM_ARG0+0x0C(r1) ; Save a register
2024 stw r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 2025
91447636
A
2026 mfsprg r31,1 ; Get the current activation
2027 lwz r31,ACT_PER_PROC(r31) ; Get the per_proc block
483a1d10 2028
483a1d10
A
2029 rlwinm r28, r3, 31-dnap, dnap, dnap ; Shift the 1 bit to the dnap+32 bit
2030 rlwinm r3, r3, 2, 29, 29 ; Shift the 1 to a 4 and mask
2031 addi r3, r3, pfPowerTune0 ; Add in the pfPowerTune0 offset
2032 lwzx r29, r31, r3 ; Load the PowerTune number 0 or 1
2033
2034 sldi r28, r28, 32 ; Shift to the top half
2035 ld r3, pfHID0(r31) ; Load the saved hid0 value
2036 and r28, r28, r3 ; Save the dnap bit
2037 lis r4, hi16(dnapm) ; Make a mask for the dnap bit
2038 sldi r4, r4, 32 ; Shift to the top half
2039 andc r3, r3, r4 ; Clear the dnap bit
2040 or r28, r28, r3 ; Insert the dnap bit as needed for later
2041
2042 sync
2043 mtspr hid0, r3 ; Turn off dnap in hid0
2044 mfspr r3, hid0 ; Yes, this is silly, keep it here
2045 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2046 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2047 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2048 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2049 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2050 isync ; Make sure it is set
2051
2052 lis r3, hi16(PowerTuneControlReg) ; Write zero to the PCR
2053 ori r3, r3, lo16(PowerTuneControlReg)
2054 li r4, 0
2055 li r5, 0
2056 bl _ml_scom_write
2057
2058 lis r3, hi16(PowerTuneControlReg) ; Write the PowerTune value to the PCR
2059 ori r3, r3, lo16(PowerTuneControlReg)
2060 li r4, 0
2061 mr r5, r29
2062 bl _ml_scom_write
2063
2064 rlwinm r29, r29, 13-6, 6, 7 ; Move to PSR speed location and isolate the requested speed
2065spsPowerTuneLoop:
2066 lis r3, hi16(PowerTuneStatusReg) ; Read the status from the PSR
2067 ori r3, r3, lo16(PowerTuneStatusReg)
2068 li r4, 0
2069 bl _ml_scom_read
2070 srdi r5, r5, 32
2071 rlwinm r0, r5, 0, 6, 7 ; Isolate the current speed
2072 rlwimi r0, r5, 0, 2, 2 ; Copy in the change in progress bit
2073 cmpw r0, r29 ; Compare the requested and current speeds
2074 beq spsPowerTuneDone
2075 rlwinm. r0, r5, 0, 3, 3
2076 beq spsPowerTuneLoop
2077
2078spsPowerTuneDone:
2079 sync
2080 mtspr hid0, r28 ; Turn on dnap in hid0 if needed
2081 mfspr r28, hid0 ; Yes, this is silly, keep it here
2082 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2083 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2084 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2085 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2086 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2087 isync ; Make sure it is set
2088
483a1d10
A
2089 lwz r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
2090 lwz r28, FM_ARG0+0x00(r1) ; Restore a register
2091 lwz r29, FM_ARG0+0x04(r1) ; Restore a register
2092 lwz r30, FM_ARG0+0x08(r1) ; Restore a register
2093 lwz r31, FM_ARG0+0x0C(r1) ; Restore a register
2094 lwz r1, FM_BACKPTR(r1) ; Pop the stack
2095 mtlr r0
d52fe63f 2096 blr
d12e1678 2097
5353443c
A
2098/*
2099** ml_set_processor_speed_dpll()
2100**
2101*/
2102; Force a line boundry here
2103 .align 5
2104 .globl EXT(ml_set_processor_speed_dpll)
2105
2106LEXT(ml_set_processor_speed_dpll)
91447636
A
2107 mfsprg r5,1 ; Get the current activation
2108 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
5353443c
A
2109
2110 cmplwi r3, 0 ; Turn off BTIC before low speed
2111 beq spsDPLL1
2112 mfspr r4, hid0 ; Get the current hid0 value
2113 rlwinm r4, r4, 0, btic+1, btic-1 ; Clear the BTIC bit
2114 sync
2115 mtspr hid0, r4 ; Set the new hid0 value
2116 isync
2117 sync
2118
2119spsDPLL1:
2120 mfspr r4, hid1 ; Get the current PLL settings
2121 rlwimi r4, r3, 31-hid1ps, hid1ps, hid1ps ; Copy the PLL Select bit
2122 stw r4, pfHID1(r5) ; Save the new hid1 value
2123 mtspr hid1, r4 ; Select desired PLL
2124
2125 cmplwi r3, 0 ; Restore BTIC after high speed
2126 bne spsDPLL2
2127 lwz r4, pfHID0(r5) ; Load the hid0 value
2128 sync
2129 mtspr hid0, r4 ; Set the hid0 value
2130 isync
2131 sync
2132spsDPLL2:
2133 blr
2134
2135
2136/*
3a60a9f5
A
2137** ml_set_processor_speed_dfs(divideby)
2138** divideby == 0 then divide by 1 (full speed)
2139** divideby == 1 then divide by 2 (half speed)
2140** divideby == 2 then divide by 4 (quarter speed)
2141** divideby == 3 then divide by 4 (quarter speed) - preferred
5353443c
A
2142**
2143*/
2144; Force a line boundry here
2145 .align 5
2146 .globl EXT(ml_set_processor_speed_dfs)
2147
2148LEXT(ml_set_processor_speed_dfs)
5353443c 2149
3a60a9f5
A
2150 mfspr r4,hid1 ; Get the current HID1
2151 mfsprg r5,0 ; Get the per_proc_info
2152 rlwimi r4,r3,31-hid1dfs1,hid1dfs0,hid1dfs1 ; Stick the new divider bits in
2153 stw r4,pfHID1(r5) ; Save the new hid1 value
5353443c 2154 sync
3a60a9f5 2155 mtspr hid1,r4 ; Set the new HID1
5353443c
A
2156 sync
2157 isync
2158 blr
2159
2160
d12e1678
A
2161/*
2162** ml_set_processor_voltage()
2163**
2164*/
2165; Force a line boundry here
2166 .align 5
2167 .globl EXT(ml_set_processor_voltage)
2168
2169LEXT(ml_set_processor_voltage)
91447636
A
2170 mfsprg r5,1 ; Get the current activation
2171 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
4a249263
A
2172
2173 lwz r6, pfPowerModes(r5) ; Get the supported power modes
2174
2175 rlwinm. r0, r6, 0, pmDPLLVminb, pmDPLLVminb ; Is DPLL Vmin supported
2176 beq spvDone
2177
2178 mfspr r4, hid2 ; Get HID2 value
2179 rlwimi r4, r3, 31-hid2vmin, hid2vmin, hid2vmin ; Insert the voltage mode bit
2180 mtspr hid2, r4 ; Set the voltage mode
2181 sync ; Make sure it is done
2182
2183spvDone:
d12e1678 2184 blr
483a1d10
A
2185
2186
2187;
2188; unsigned int ml_scom_write(unsigned int reg, unsigned long long data)
2189; 64-bit machines only
2190; returns status
2191;
2192
2193 .align 5
2194 .globl EXT(ml_scom_write)
2195
2196LEXT(ml_scom_write)
2197
2198 rldicr r3,r3,8,47 ; Align register it correctly
2199 rldimi r5,r4,32,0 ; Merge the high part of data
2200 sync ; Clean up everything
2201
2202 mtspr scomd,r5 ; Stick in the data
2203 mtspr scomc,r3 ; Set write to register
2204 sync
2205 isync
2206
2207 mfspr r3,scomc ; Read back status
2208 blr ; leave....
2209
2210;
2211; unsigned int ml_read_scom(unsigned int reg, unsigned long long *data)
2212; 64-bit machines only
2213; returns status
2214; ASM Callers: data (r4) can be zero and the 64 bit data will be returned in r5
2215;
2216
2217 .align 5
2218 .globl EXT(ml_scom_read)
2219
2220LEXT(ml_scom_read)
2221
2222 mfsprg r0,2 ; Get the feature flags
2223 rldicr r3,r3,8,47 ; Align register it correctly
2224 rlwinm r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
2225
2226 ori r3,r3,0x8000 ; Set to read data
2227 sync
2228
2229 mtspr scomc,r3 ; Request the register
2230 mfspr r5,scomd ; Get the register contents
2231 mfspr r3,scomc ; Get back the status
2232 sync
2233 isync
2234
2235 sld r5,r5,r0 ; Fix up if needed
2236
2237 cmplwi r4, 0 ; If data pointer is null, just return
2238 beqlr ; the received data in r5
2239 std r5,0(r4) ; Pass back the received data
2240 blr ; Leave...
a3d08fcd
A
2241
2242;
2243; Calculates the hdec to dec ratio
2244;
2245
2246 .align 5
2247 .globl EXT(ml_hdec_ratio)
2248
2249LEXT(ml_hdec_ratio)
2250
2251 li r0,0 ; Clear the EE bit (and everything else for that matter)
2252 mfmsr r11 ; Get the MSR
2253 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
2254 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
2255 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
2256 or r12,r10,r11 ; Turn on EE if on before we turned it off
2257
2258 mftb r9 ; Get time now
2259 mfspr r2,hdec ; Save hdec
2260
2261mhrcalc: mftb r8 ; Get time now
2262 sub r8,r8,r9 ; How many ticks?
2263 cmplwi r8,10000 ; 10000 yet?
2264 blt mhrcalc ; Nope...
2265
2266 mfspr r9,hdec ; Get hdec now
2267 sub r3,r2,r9 ; How many ticks?
2268 mtmsrd r12,1 ; Flip EE on if needed
2269 blr ; Leave...
3a60a9f5
A
2270
2271
2272;
2273; int setPop(time)
2274;
2275; Calculates the number of ticks to the supplied event and
2276; sets the decrementer. Never set the time for less that the
2277; minimum, which is 10, nor more than maxDec, which is usually 0x7FFFFFFF
2278; and never more than that but can be set by root.
2279;
2280;
2281
2282 .align 7
2283 .globl EXT(setPop)
2284
2285#define kMin 10
2286
2287LEXT(setPop)
2288
2289spOver: mftbu r8 ; Get upper time
2290 addic r2,r4,-kMin ; Subtract minimum from target
2291 mftb r9 ; Get lower
2292 addme r11,r3 ; Do you have any bits I could borrow?
2293 mftbu r10 ; Get upper again
2294 subfe r0,r0,r0 ; Get -1 if we went negative 0 otherwise
2295 subc r7,r2,r9 ; Subtract bottom and get carry
2296 cmplw r8,r10 ; Did timebase upper tick?
2297 subfe r6,r8,r11 ; Get the upper difference accounting for borrow
2298 lwz r12,maxDec(0) ; Get the maximum decrementer size
2299 addme r0,r0 ; Get -1 or -2 if anything negative, 0 otherwise
2300 addic r2,r6,-1 ; Set carry if diff < 2**32
2301 srawi r0,r0,1 ; Make all foxes
2302 subi r10,r12,kMin ; Adjust maximum for minimum adjust
2303 andc r7,r7,r0 ; Pin time at 0 if under minimum
2304 subfe r2,r2,r2 ; 0 if diff > 2**32, -1 otherwise
2305 sub r7,r7,r10 ; Negative if duration is less than (max - min)
2306 or r2,r2,r0 ; If the duration is negative, it isn't too big
2307 srawi r0,r7,31 ; -1 if duration is too small
2308 and r7,r7,r2 ; Clear duration if high part too big
2309 and r7,r7,r0 ; Clear duration if low part too big
2310 bne-- spOver ; Timer ticked...
2311 add r3,r7,r12 ; Add back the max for total
2312 mtdec r3 ; Set the decrementer
2313 blr ; Leave...
2314
2315