]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines_asm.s
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines_asm.s
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30#include <ppc/asm.h>
31#include <ppc/proc_reg.h>
1c79356b
A
32#include <assym.s>
33#include <debug.h>
34#include <mach/ppc/vm_param.h>
35#include <ppc/exception.h>
36
55e303ae
A
37
38/*
39 * ml_set_physical() -- turn off DR and (if 64-bit) turn SF on
40 * it is assumed that pf64Bit is already in cr6
41 * ml_set_physical_get_ffs() -- turn DR off, SF on, and get feature flags
42 * ml_set_physical_disabled() -- turn DR and EE off, SF on, get feature flags
43 * ml_set_translation_off() -- turn DR, IR, and EE off, SF on, get feature flags
44 *
45 * Callable only from assembler, these return:
46 * r2 -- new MSR
47 * r11 -- old MSR
48 * r10 -- feature flags (pf64Bit etc, ie SPRG 2)
49 * cr6 -- feature flags 24-27, ie pf64Bit, pf128Byte, and pf32Byte
50 *
51 * Uses r0 and r2. ml_set_translation_off also uses r3 and cr5.
52 */
53
54 .align 4
55 .globl EXT(ml_set_translation_off)
56LEXT(ml_set_translation_off)
57 mfsprg r10,2 // get feature flags
58 li r0,0 ; Clear this
59 mtcrf 0x02,r10 // move pf64Bit etc to cr6
60 ori r0,r0,lo16(MASK(MSR_EE)+MASK(MSR_FP)+MASK(MSR_IR)+MASK(MSR_DR)) // turn off all 4
61 mfmsr r11 // get MSR
62 oris r0,r0,hi16(MASK(MSR_VEC)) // Turn off vector too
63 mtcrf 0x04,r10 // move pfNoMSRir etc to cr5
64 andc r2,r11,r0 // turn off EE, IR, and DR
65 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
66 bf pfNoMSRirb,ml_set_physical_32 // skip if we can load MSR directly
67 li r0,loadMSR // Get the MSR setter SC
68 mr r3,r2 // copy new MSR to r2
69 sc // Set it
70 blr
71
72 .align 4
73 .globl EXT(ml_set_physical_disabled)
74
75LEXT(ml_set_physical_disabled)
76 li r0,0 ; Clear
77 mfsprg r10,2 // get feature flags
78 ori r0,r0,lo16(MASK(MSR_EE)) // turn EE and fp off
79 mtcrf 0x02,r10 // move pf64Bit etc to cr6
80 b ml_set_physical_join
81
82 .align 5
83 .globl EXT(ml_set_physical_get_ffs)
84
85LEXT(ml_set_physical_get_ffs)
86 mfsprg r10,2 // get feature flags
87 mtcrf 0x02,r10 // move pf64Bit etc to cr6
88
89 .globl EXT(ml_set_physical)
90LEXT(ml_set_physical)
91
92 li r0,0 // do not turn off interrupts
93
94ml_set_physical_join:
95 oris r0,r0,hi16(MASK(MSR_VEC)) // Always gonna turn of vectors
96 mfmsr r11 // get MSR
97 ori r0,r0,lo16(MASK(MSR_DR)+MASK(MSR_FP)) // always turn off DR and FP bit
98 andc r2,r11,r0 // turn off DR and maybe EE
99 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
100ml_set_physical_32:
101 mtmsr r2 // turn off translation
102 isync
103 blr
104
105ml_set_physical_64:
106 li r0,1 // get a 1 to slam into SF
107 rldimi r2,r0,63,MSR_SF_BIT // set SF bit (bit 0)
108 mtmsrd r2 // set 64-bit mode, turn off data relocation
109 isync // synchronize
110 blr
111
112
113/*
114 * ml_restore(old_MSR)
115 *
116 * Callable only from assembler, restores the MSR in r11 saved by ml_set_physical.
117 * We assume cr6 and r11 are as set by ml_set_physical, ie:
118 * cr6 - pf64Bit flag (feature flags 24-27)
119 * r11 - old MSR
120 */
121
122 .align 5
123 .globl EXT(ml_restore)
124
125LEXT(ml_restore)
126 bt++ pf64Bitb,ml_restore_64 // handle 64-bit cpus (only they take the hint)
127 mtmsr r11 // restore a 32-bit MSR
128 isync
129 blr
130
131ml_restore_64:
132 mtmsrd r11 // restore a 64-bit MSR
133 isync
134 blr
135
136
1c79356b
A
137/* PCI config cycle probing
138 *
139 * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val)
140 *
141 * Read the memory location at physical address paddr.
142 * This is a part of a device probe, so there is a good chance we will
143 * have a machine check here. So we have to be able to handle that.
144 * We assume that machine checks are enabled both in MSR and HIDs
145 */
146
147; Force a line boundry here
148 .align 5
149 .globl EXT(ml_probe_read)
150
151LEXT(ml_probe_read)
152
153 mfsprg r9,2 ; Get feature flags
55e303ae
A
154
155 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
156 rlwinm r3,r3,0,0,31 ; Clean up for 64-bit machines
157 bne++ mpr64bit ; Go do this the 64-bit way...
158
159mpr32bit: lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
1c79356b 160 mfmsr r0 ; Save the current MSR
55e303ae
A
161 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
162
1c79356b 163 neg r10,r3 ; Number of bytes to end of page
55e303ae 164 andc r0,r0,r8 ; Clear VEC and FP
1c79356b 165 rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry
55e303ae 166 ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, IR, and DR
1c79356b 167 mr r12,r3 ; Save the load address
55e303ae 168 andc r2,r0,r8 ; Clear VEC, FP, and EE
1c79356b
A
169 mtcrf 0x04,r9 ; Set the features
170 cmplwi cr1,r10,4 ; At least 4 bytes left in page?
1c79356b
A
171 beq- mprdoit ; We are right on the boundary...
172 li r3,0
173 bltlr- cr1 ; No, just return failure...
174
175mprdoit:
176
177 bt pfNoMSRirb,mprNoMSR ; No MSR...
178
179 mtmsr r2 ; Translation and all off
180 isync ; Toss prefetch
181 b mprNoMSRx
182
183mprNoMSR:
184 mr r5,r0
185 li r0,loadMSR ; Get the MSR setter SC
186 mr r3,r2 ; Get new MSR
187 sc ; Set it
188 mr r0,r5
189 li r3,0
190mprNoMSRx:
d52fe63f
A
191
192 mfspr r6, hid0 ; Get a copy of hid0
1c79356b 193
9bccf70c
A
194 rlwinm. r5, r9, 0, pfNoMuMMCKb, pfNoMuMMCKb ; Check for NoMuMMCK
195 bne mprNoMuM
196
197 rlwinm r5, r6, 0, ice+1, ice-1 ; Turn off L1 I-Cache
198 mtspr hid0, r5
199 isync ; Wait for I-Cache off
200 rlwinm r5, r6, 0, mum+1, mum-1 ; Turn off MuM w/ I-Cache on
201 mtspr hid0, r5
202mprNoMuM:
d52fe63f 203
1c79356b
A
204;
205; We need to insure that there is no more than 1 BAT register that
206; can get a hit. There could be repercussions beyond the ken
207; of mortal man. It is best not to tempt fate.
208;
d52fe63f
A
209
210; Note: we will reload these from the shadow BATs later
211
1c79356b 212 li r10,0 ; Clear a register
1c79356b
A
213
214 sync ; Make sure all is well
215
216 mtdbatu 1,r10 ; Invalidate DBAT 1
217 mtdbatu 2,r10 ; Invalidate DBAT 2
218 mtdbatu 3,r10 ; Invalidate DBAT 3
219
220 rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary
221 ori r11,r10,0x32 ; Set uncached, coherent, R/W
222 ori r10,r10,2 ; Make the upper half (128k, valid supervisor)
223 mtdbatl 0,r11 ; Set lower BAT first
224 mtdbatu 0,r10 ; Now the upper
225 sync ; Just make sure
226
d52fe63f
A
227 dcbf 0,r12 ; Make sure we kill the cache to avoid paradoxes
228 sync
229
1c79356b
A
230 ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation
231 mtmsr r11 ; Do it for real
232 isync ; Make sure of it
233
234 eieio ; Make sure of all previous accesses
235 sync ; Make sure it is all caught up
236
237 lwz r11,0(r12) ; Get it and maybe machine check here
238
239 eieio ; Make sure of ordering again
240 sync ; Get caught up yet again
241 isync ; Do not go further till we are here
242
d52fe63f
A
243 mtmsr r2 ; Turn translation back off
244 isync
245
d52fe63f
A
246 lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
247 ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
248
249 lwz r5,0(r10) ; Pick up DBAT 0 high
250 lwz r6,4(r10) ; Pick up DBAT 0 low
251 lwz r7,8(r10) ; Pick up DBAT 1 high
252 lwz r8,16(r10) ; Pick up DBAT 2 high
253 lwz r9,24(r10) ; Pick up DBAT 3 high
254
1c79356b
A
255 mtdbatu 0,r5 ; Restore DBAT 0 high
256 mtdbatl 0,r6 ; Restore DBAT 0 low
257 mtdbatu 1,r7 ; Restore DBAT 1 high
258 mtdbatu 2,r8 ; Restore DBAT 2 high
259 mtdbatu 3,r9 ; Restore DBAT 3 high
260 sync
261
262 li r3,1 ; We made it
263
264 mtmsr r0 ; Restore translation and exceptions
265 isync ; Toss speculations
266
267 stw r11,0(r4) ; Save the loaded value
268 blr ; Return...
269
270; Force a line boundry here. This means we will be able to check addresses better
271 .align 5
272 .globl EXT(ml_probe_read_mck)
273LEXT(ml_probe_read_mck)
274
55e303ae
A
275
276/* PCI config cycle probing - 64-bit
277 *
278 * boolean_t ml_probe_read_64(addr64_t paddr, unsigned int *val)
279 *
280 * Read the memory location at physical address paddr.
281 * This is a part of a device probe, so there is a good chance we will
282 * have a machine check here. So we have to be able to handle that.
283 * We assume that machine checks are enabled both in MSR and HIDs
284 */
285
286; Force a line boundry here
287 .align 6
288 .globl EXT(ml_probe_read_64)
289
290LEXT(ml_probe_read_64)
291
292 mfsprg r9,2 ; Get feature flags
293 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
294 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
295 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
296
297 mr r4,r5 ; Move result to common register
298 beq-- mpr32bit ; Go do this the 32-bit way...
299
300mpr64bit: andi. r0,r3,3 ; Check if we are on a word boundary
301 li r0,0 ; Clear the EE bit (and everything else for that matter)
302 bne-- mprFail ; Boundary not good...
303 mfmsr r11 ; Get the MSR
304 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
305 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
306 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
307 or r12,r10,r11 ; Turn on EE if on before we turned it off
308 ori r0,r0,lo16(MASK(MSR_IR)|MASK(MSR_DR)) ; Get the IR and DR bits
309 li r2,1 ; Get a 1
310 sldi r2,r2,63 ; Get the 64-bit bit
311 andc r10,r10,r0 ; Clear IR and DR
312 or r10,r10,r2 ; Set 64-bit
313
314 li r0,1 ; Get a 1
315 mtmsrd r10 ; Translation and EE off, 64-bit on
316 isync
317
318 sldi r0,r0,32+8 ; Get the right bit to inhibit caching
319
320 mfspr r8,hid4 ; Get HID4
321 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
322 sync ; Sync up
323 mtspr hid4,r2 ; Make real accesses cache-inhibited
324 isync ; Toss prefetches
325
326 lis r7,0xE000 ; Get the unlikeliest ESID possible
327 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
328 slbie r7 ; Make sure the ERAT is cleared
329
330 sync
331 isync
332
333 eieio ; Make sure of all previous accesses
334
335 lwz r11,0(r3) ; Get it and maybe machine check here
336
337 eieio ; Make sure of ordering again
338 sync ; Get caught up yet again
339 isync ; Do not go further till we are here
340
341 sync ; Sync up
342 mtspr hid4,r8 ; Make real accesses not cache-inhibited
343 isync ; Toss prefetches
344
345 lis r7,0xE000 ; Get the unlikeliest ESID possible
346 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
347 slbie r7 ; Make sure the ERAT is cleared
348
349 mtmsrd r12 ; Restore entry MSR
350 isync
351
352 stw r11,0(r4) ; Pass back the result
353 li r3,1 ; Indicate success
354 blr ; Leave...
355
356mprFail: li r3,0 ; Set failure
357 blr ; Leave...
358
359; Force a line boundry here. This means we will be able to check addresses better
360 .align 6
361 .globl EXT(ml_probe_read_mck_64)
362LEXT(ml_probe_read_mck_64)
363
364
365/* Read physical address byte
1c79356b
A
366 *
367 * unsigned int ml_phys_read_byte(vm_offset_t paddr)
55e303ae 368 * unsigned int ml_phys_read_byte_64(addr64_t paddr)
1c79356b
A
369 *
370 * Read the byte at physical address paddr. Memory should not be cache inhibited.
371 */
372
373; Force a line boundry here
55e303ae 374
1c79356b 375 .align 5
55e303ae
A
376 .globl EXT(ml_phys_read_byte_64)
377
378LEXT(ml_phys_read_byte_64)
379
380 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
381 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
382 b ml_phys_read_byte_join
383
1c79356b
A
384 .globl EXT(ml_phys_read_byte)
385
386LEXT(ml_phys_read_byte)
55e303ae
A
387 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
388ml_phys_read_byte_join: ; r3 = address to read (reg64_t)
389 mflr r11 ; Save the return
390 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
391
392 lbz r3,0(r3) ; Get the byte
393 b rdwrpost ; Clean up and leave...
d7e50217 394
d7e50217 395
55e303ae
A
396/* Read physical address half word
397 *
398 * unsigned int ml_phys_read_half(vm_offset_t paddr)
399 * unsigned int ml_phys_read_half_64(addr64_t paddr)
400 *
401 * Read the half word at physical address paddr. Memory should not be cache inhibited.
402 */
1c79356b 403
55e303ae 404; Force a line boundry here
d7e50217 405
55e303ae
A
406 .align 5
407 .globl EXT(ml_phys_read_half_64)
408
409LEXT(ml_phys_read_half_64)
410
411 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
412 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
413 b ml_phys_read_half_join
d7e50217 414
55e303ae
A
415 .globl EXT(ml_phys_read_half)
416
417LEXT(ml_phys_read_half)
418 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
419ml_phys_read_half_join: ; r3 = address to read (reg64_t)
420 mflr r11 ; Save the return
421 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
422
423 lhz r3,0(r3) ; Get the half word
424 b rdwrpost ; Clean up and leave...
425
426
427/* Read physical address word
1c79356b
A
428 *
429 * unsigned int ml_phys_read(vm_offset_t paddr)
55e303ae
A
430 * unsigned int ml_phys_read_64(addr64_t paddr)
431 * unsigned int ml_phys_read_word(vm_offset_t paddr)
432 * unsigned int ml_phys_read_word_64(addr64_t paddr)
1c79356b
A
433 *
434 * Read the word at physical address paddr. Memory should not be cache inhibited.
435 */
436
437; Force a line boundry here
55e303ae 438
1c79356b 439 .align 5
55e303ae
A
440 .globl EXT(ml_phys_read_64)
441 .globl EXT(ml_phys_read_word_64)
442
443LEXT(ml_phys_read_64)
444LEXT(ml_phys_read_word_64)
445
446 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
447 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
448 b ml_phys_read_word_join
449
1c79356b 450 .globl EXT(ml_phys_read)
55e303ae 451 .globl EXT(ml_phys_read_word)
1c79356b
A
452
453LEXT(ml_phys_read)
55e303ae
A
454LEXT(ml_phys_read_word)
455 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
456ml_phys_read_word_join: ; r3 = address to read (reg64_t)
457 mflr r11 ; Save the return
458 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
459
460 lwz r3,0(r3) ; Get the word
461 b rdwrpost ; Clean up and leave...
d7e50217 462
d7e50217 463
55e303ae
A
464/* Read physical address double word
465 *
466 * unsigned long long ml_phys_read_double(vm_offset_t paddr)
467 * unsigned long long ml_phys_read_double_64(addr64_t paddr)
468 *
469 * Read the double word at physical address paddr. Memory should not be cache inhibited.
470 */
471
472; Force a line boundry here
473
474 .align 5
475 .globl EXT(ml_phys_read_double_64)
476
477LEXT(ml_phys_read_double_64)
478
479 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
480 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
481 b ml_phys_read_double_join
482
483 .globl EXT(ml_phys_read_double)
484
485LEXT(ml_phys_read_double)
486 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
487ml_phys_read_double_join: ; r3 = address to read (reg64_t)
488 mflr r11 ; Save the return
489 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
1c79356b 490
55e303ae
A
491 lwz r4,4(r3) ; Get the low word
492 lwz r3,0(r3) ; Get the high word
493 b rdwrpost ; Clean up and leave...
1c79356b 494
1c79356b
A
495
496/* Write physical address byte
497 *
498 * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
55e303ae 499 * void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
1c79356b
A
500 *
501 * Write the byte at physical address paddr. Memory should not be cache inhibited.
502 */
503
1c79356b 504 .align 5
55e303ae
A
505 .globl EXT(ml_phys_write_byte_64)
506
507LEXT(ml_phys_write_byte_64)
508
509 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
510 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
511 mr r4,r5 ; Copy over the data
512 b ml_phys_write_byte_join
513
1c79356b
A
514 .globl EXT(ml_phys_write_byte)
515
516LEXT(ml_phys_write_byte)
55e303ae
A
517 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
518ml_phys_write_byte_join: ; r3 = address to write (reg64_t), r4 = data
519 mflr r11 ; Save the return
520 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
521
522 stb r4,0(r3) ; Set the byte
523 b rdwrpost ; Clean up and leave...
1c79356b 524
d7e50217 525
55e303ae
A
526/* Write physical address half word
527 *
528 * void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
529 * void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
530 *
531 * Write the half word at physical address paddr. Memory should not be cache inhibited.
532 */
533
534 .align 5
535 .globl EXT(ml_phys_write_half_64)
536
537LEXT(ml_phys_write_half_64)
538
539 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
540 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
541 mr r4,r5 ; Copy over the data
542 b ml_phys_write_half_join
543
544 .globl EXT(ml_phys_write_half)
545
546LEXT(ml_phys_write_half)
547 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
548ml_phys_write_half_join: ; r3 = address to write (reg64_t), r4 = data
549 mflr r11 ; Save the return
550 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
1c79356b 551
55e303ae
A
552 sth r4,0(r3) ; Set the half word
553 b rdwrpost ; Clean up and leave...
1c79356b 554
1c79356b 555
55e303ae 556/* Write physical address word
1c79356b
A
557 *
558 * void ml_phys_write(vm_offset_t paddr, unsigned int data)
55e303ae
A
559 * void ml_phys_write_64(addr64_t paddr, unsigned int data)
560 * void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
561 * void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
1c79356b
A
562 *
563 * Write the word at physical address paddr. Memory should not be cache inhibited.
564 */
565
1c79356b 566 .align 5
55e303ae
A
567 .globl EXT(ml_phys_write_64)
568 .globl EXT(ml_phys_write_word_64)
569
570LEXT(ml_phys_write_64)
571LEXT(ml_phys_write_word_64)
572
573 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
574 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
575 mr r4,r5 ; Copy over the data
576 b ml_phys_write_word_join
577
1c79356b 578 .globl EXT(ml_phys_write)
55e303ae 579 .globl EXT(ml_phys_write_word)
1c79356b
A
580
581LEXT(ml_phys_write)
55e303ae
A
582LEXT(ml_phys_write_word)
583 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
584ml_phys_write_word_join: ; r3 = address to write (reg64_t), r4 = data
585 mflr r11 ; Save the return
586 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
587
588 stw r4,0(r3) ; Set the word
589 b rdwrpost ; Clean up and leave...
d7e50217 590
d7e50217 591
55e303ae
A
592/* Write physical address double word
593 *
594 * void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
595 * void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
596 *
597 * Write the double word at physical address paddr. Memory should not be cache inhibited.
598 */
599
600 .align 5
601 .globl EXT(ml_phys_write_double_64)
602
603LEXT(ml_phys_write_double_64)
604
605 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
606 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
607 mr r4,r5 ; Copy over the high data
608 mr r5,r6 ; Copy over the low data
609 b ml_phys_write_double_join
610
611 .globl EXT(ml_phys_write_double)
612
613LEXT(ml_phys_write_double)
614 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
615ml_phys_write_double_join: ; r3 = address to write (reg64_t), r4,r5 = data (long long)
616 mflr r11 ; Save the return
617 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
618
619 stw r4,0(r3) ; Set the high word
620 stw r5,4(r3) ; Set the low word
621 b rdwrpost ; Clean up and leave...
622
623
624 .align 5
625
626rdwrpre: mfsprg r12,2 ; Get feature flags
627 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
628 mfmsr r10 ; Save the MSR
629 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
630 mtcrf 0x02,r12 ; move pf64Bit
631 andc r10,r10,r8 ; Clear VEC and FP
632 ori r9,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, DR, and IR
633 li r2,1 ; Prepare for 64 bit
634 andc r9,r10,r9 ; Clear VEC, FP, DR, and EE
635 bf-- pf64Bitb,rdwrpre32 ; Join 32-bit code...
636
637 srdi r7,r3,31 ; Get a 1 if address is in I/O memory
638 rldimi r9,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
639 cmpldi cr7,r7,1 ; Is source in I/O memory?
640 mtmsrd r9 ; set 64-bit mode, turn off EE, DR, and IR
641 isync ; synchronize
642
643 sldi r0,r2,32+8 ; Get the right bit to turn off caching
644
645 bnelr++ cr7 ; We are not in the I/O area, all ready...
646
647 mfspr r8,hid4 ; Get HID4
648 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
649 sync ; Sync up
650 mtspr hid4,r2 ; Make real accesses cache-inhibited
651 isync ; Toss prefetches
652
653 lis r7,0xE000 ; Get the unlikeliest ESID possible
654 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
655 slbie r7 ; Make sure the ERAT is cleared
1c79356b 656
1c79356b 657 sync
55e303ae
A
658 isync
659 blr ; Finally, all ready...
660
661 .align 5
662
663rdwrpre32: rlwimi r9,r10,0,MSR_IR_BIT,MSR_IR_BIT ; Leave the IR bit unchanged
664 mtmsr r9 ; Drop EE, DR, and leave IR unchanged
665 isync
666 blr ; All set up, leave...
667
668 .align 5
669
670rdwrpost: mtlr r11 ; Restore the return
671 bt++ pf64Bitb,rdwrpost64 ; Join 64-bit code...
672
673 mtmsr r10 ; Restore entry MSR (sans FP and VEC)
674 isync
675 blr ; Leave...
676
677rdwrpost64: bne++ cr7,rdwrpcok ; Skip enabling real mode caching if we did not change it...
1c79356b 678
55e303ae
A
679 sync ; Sync up
680 mtspr hid4,r8 ; Make real accesses not cache-inhibited
681 isync ; Toss prefetches
682
683 lis r7,0xE000 ; Get the unlikeliest ESID possible
684 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
685 slbie r7 ; Make sure the ERAT is cleared
686
687rdwrpcok: mtmsrd r10 ; Restore entry MSR (sans FP and VEC)
1c79356b 688 isync
55e303ae 689 blr ; Leave...
1c79356b
A
690
691
692/* set interrupts enabled or disabled
693 *
694 * boolean_t set_interrupts_enabled(boolean_t enable)
695 *
696 * Set EE bit to "enable" and return old value as boolean
697 */
698
699; Force a line boundry here
0b4e3aa0
A
700 .align 5
701 .globl EXT(ml_set_interrupts_enabled)
702
703LEXT(ml_set_interrupts_enabled)
1c79356b 704
55e303ae
A
705 andi. r4,r3,1 ; Are we turning interruptions on?
706 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
1c79356b 707 mfmsr r5 ; Get the current MSR
55e303ae 708 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get float enable and EE enable
1c79356b 709 rlwinm r3,r5,17,31,31 ; Set return value
55e303ae
A
710 andc r5,r5,r0 ; Force VEC and FP off
711 bne CheckPreemption ; Interrupts going on, check ASTs...
712
713 mtmsr r5 ; Slam diable (always going disabled here)
714 isync ; Need this because FP/Vec might go off
1c79356b
A
715 blr
716
55e303ae
A
717 .align 5
718
d7e50217 719CheckPreemption:
55e303ae 720 mfsprg r9,1 ; Get current activation
91447636
A
721 lwz r7,ACT_PER_PROC(r9) ; Get the per_proc block
722 ori r5,r5,lo16(MASK(MSR_EE)) ; Turn on the enable
723 lwz r8,PP_PENDING_AST(r7) ; Get pending AST mask
55e303ae
A
724 li r6,AST_URGENT ; Get the type we will preempt for
725 lwz r7,ACT_PREEMPT_CNT(r9) ; Get preemption count
55e303ae
A
726 lis r0,hi16(DoPreemptCall) ; High part of Preempt FW call
727 cmpwi cr1,r7,0 ; Are preemptions masked off?
728 and. r8,r8,r6 ; Are we urgent?
729 crorc cr1_eq,cr0_eq,cr1_eq ; Remember if preemptions are masked or not urgent
730 ori r0,r0,lo16(DoPreemptCall) ; Bottome of FW call
731
d7e50217 732 mtmsr r5 ; Restore the MSR now, before we can preempt
55e303ae
A
733 isync ; Need this because FP/Vec might go off
734
735 beqlr++ cr1 ; Return if no premption...
d7e50217 736 sc ; Preempt
0b4e3aa0
A
737 blr
738
91447636
A
739; Force a line boundry here
740 .align 5
741 .globl EXT(timer_update)
742
743LEXT(timer_update)
744 stw r4,TIMER_HIGHCHK(r3)
745 eieio
746 stw r5,TIMER_LOW(r3)
747 eieio
748 stw r4,TIMER_HIGH(r3)
749 blr
de355530
A
750
751; Force a line boundry here
752 .align 5
91447636 753 .globl EXT(timer_grab)
de355530 754
91447636
A
755LEXT(timer_grab)
7560: lwz r11,TIMER_HIGH(r3)
757 lwz r4,TIMER_LOW(r3)
758 isync
759 lwz r9,TIMER_HIGHCHK(r3)
760 cmpw r11,r9
761 bne-- 0b
762 mr r3,r11
763 blr
de355530 764
91447636
A
765; Force a line boundry here
766 .align 5
767 .globl EXT(timer_event)
768
769LEXT(timer_event)
770 mfsprg r10,1 ; Get the current activation
771 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
772 addi r10,r10,PP_PROCESSOR
773 lwz r11,CURRENT_TIMER(r10)
774
775 lwz r9,TIMER_LOW(r11)
776 lwz r2,TIMER_TSTAMP(r11)
777 add r0,r9,r3
778 subf r5,r2,r0
779 cmplw r5,r9
780 bge++ 0f
781
782 lwz r6,TIMER_HIGH(r11)
783 addi r6,r6,1
784 stw r6,TIMER_HIGHCHK(r11)
785 eieio
786 stw r5,TIMER_LOW(r11)
787 eieio
788 stw r6,TIMER_HIGH(r11)
789 b 1f
790
7910: stw r5,TIMER_LOW(r11)
792
7931: stw r4,CURRENT_TIMER(r10)
794 stw r3,TIMER_TSTAMP(r4)
795 blr
de355530 796
1c79356b
A
797/* Set machine into idle power-saving mode.
798 *
91447636 799 * void machine_idle(void)
1c79356b
A
800 *
801 * We will use the PPC NAP or DOZE for this.
802 * This call always returns. Must be called with spllo (i.e., interruptions
803 * enabled).
804 *
805 */
806
1c79356b
A
807; Force a line boundry here
808 .align 5
91447636 809 .globl EXT(machine_idle)
1c79356b 810
91447636 811LEXT(machine_idle)
1c79356b 812
91447636
A
813 mfsprg r12,1 ; Get the current activation
814 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
815 lhz r10,PP_CPU_FLAGS(r12) ; Get the flags
816 lwz r11,PP_INTS_ENABLED(r12) ; Get interrupt enabled state
817 andi. r10,r10,SignalReady ; Are Signal ready?
818 cmpwi cr1,r11,0 ; Are interrupt disabled?
819 cror cr0_eq, cr1_eq, cr0_eq ; Interrupt disabled or Signal not ready?
55e303ae 820 mfmsr r3 ; Save the MSR
91447636
A
821
822 beq-- nonap ; Yes, return after re-enabling interrupts
823 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector flag
55e303ae
A
824 ori r0,r0,lo16(MASK(MSR_FP)) ; Add the FP flag
825 andc r3,r3,r0 ; Clear VEC and FP
826 ori r0,r0,lo16(MASK(MSR_EE)) ; Drop EE also
827 andc r5,r3,r0 ; Clear VEC, FP, DR, and EE
828
1c79356b 829 mtmsr r5 ; Hold up interruptions for now
9bccf70c 830 isync ; May have messed with fp/vec
de355530 831 mfsprg r11,2 ; Get CPU specific features
55e303ae 832 mfspr r6,hid0 ; Get the current power-saving mode
1c79356b
A
833 mtcrf 0xC7,r11 ; Get the facility flags
834
835 lis r4,hi16(napm) ; Assume we can nap
836 bt pfWillNapb,yesnap ; Yeah, nap is ok...
837
838 lis r4,hi16(dozem) ; Assume we can doze
839 bt pfCanDozeb,yesnap ; We can sleep or doze one this machine...
5353443c 840
91447636 841nonap: ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE
5353443c 842
1c79356b
A
843 mtmsr r3 ; Turn interruptions back on
844 blr ; Leave...
845
846yesnap: mftbu r9 ; Get the upper timebase
847 mftb r7 ; Get the lower timebase
848 mftbu r8 ; Get the upper one again
849 cmplw r9,r8 ; Did the top tick?
5353443c 850 bne-- yesnap ; Yeah, need to get it again...
1c79356b
A
851 stw r8,napStamp(r12) ; Set high order time stamp
852 stw r7,napStamp+4(r12) ; Set low order nap stamp
d52fe63f 853
5eebf738 854 rlwinm. r0,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec?
91447636 855 beq-- minovec ; No...
5eebf738
A
856 dssall ; Stop the streams before we nap/doze
857 sync
858 lwz r8,napStamp(r12) ; Reload high order time stamp
859clearpipe:
860 cmplw r8,r8
5353443c 861 bne- clearpipe
5eebf738 862 isync
5eebf738 863
91447636
A
864minovec: rlwinm. r7,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before nap?
865 beq++ miL2PFok
9bccf70c
A
866
867 mfspr r7,msscr0 ; Get currect MSSCR0 value
55e303ae 868 rlwinm r7,r7,0,0,l2pfes-1 ; Disable L2 Prefetch
9bccf70c
A
869 mtspr msscr0,r7 ; Updates MSSCR0 value
870 sync
871 isync
872
91447636
A
873miL2PFok:
874 rlwinm. r7,r11,0,pfSlowNapb,pfSlowNapb ; Should nap at slow speed?
875 beq minoslownap
9bccf70c
A
876
877 mfspr r7,hid1 ; Get current HID1 value
55e303ae 878 oris r7,r7,hi16(hid1psm) ; Select PLL1
9bccf70c
A
879 mtspr hid1,r7 ; Update HID1 value
880
d52fe63f 881
1c79356b
A
882;
883; We have to open up interruptions here because book 4 says that we should
5353443c 884; turn on only the POW bit and that we should have interrupts enabled.
1c79356b
A
885; The interrupt handler will detect that nap or doze is set if an interrupt
886; is taken and set everything up to return directly to machine_idle_ret.
887; So, make sure everything we need there is already set up...
888;
55e303ae 889
5353443c 890minoslownap:
ab86ba33 891 lis r10,hi16(dozem|napm|sleepm) ; Mask of power management bits
55e303ae
A
892
893 bf-- pf64Bitb,mipNSF1 ; skip if 32-bit...
894
895 sldi r4,r4,32 ; Position the flags
896 sldi r10,r10,32 ; Position the masks
897
5353443c
A
898mipNSF1: li r2,lo16(MASK(MSR_DR)|MASK(MSR_IR)) ; Get the translation mask
899 andc r6,r6,r10 ; Clean up the old power bits
900 ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make exit msr
901 andc r5,r5,r2 ; Clear IR and DR from current MSR
1c79356b 902 or r6,r6,r4 ; Set nap or doze
5353443c
A
903 ori r5,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make nap msr
904 oris r2,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
55e303ae
A
905
906 sync
1c79356b 907 mtspr hid0,r6 ; Set up the HID for nap/doze
55e303ae
A
908 mfspr r6,hid0 ; Yes, this is silly, keep it here
909 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
910 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
911 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
912 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
913 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
1c79356b 914 isync ; Make sure it is set
1c79356b 915
91447636 916
5353443c
A
917;
918; Turn translation off to nap
919;
920
921 bt pfNoMSRirb,miNoMSR ; Jump if we need to use SC for this...
922 mtmsr r5 ; Turn translation off, interrupts on
923 isync ; Wait for it
924 b miNoMSRx ; Jump back in line...
4a249263 925
5353443c
A
926miNoMSR: mr r3,r5 ; Pass in the new MSR value
927 li r0,loadMSR ; MSR setter ultrafast
928 sc ; Do it to it like you never done before...
929
930miNoMSRx: bf-- pf64Bitb,mipowloop ; skip if 32-bit...
931
932 li r3,0x10 ; Fancy nap threshold is 0x10 ticks
4a249263
A
933 mftb r8 ; Get the low half of the time base
934 mfdec r4 ; Get the decrementer ticks
5353443c 935 cmplw r4,r3 ; Less than threshold?
4a249263
A
936 blt mipowloop
937
5353443c 938 mtdec r3 ; Load decrementer with threshold
4a249263
A
939 isync ; and make sure,
940 mfdec r3 ; really sure, it gets there
941
5353443c 942 rlwinm r6,r2,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear out the EE bit
4a249263
A
943 sync ; Make sure queues are clear
944 mtmsr r6 ; Set MSR with EE off but POW on
1c79356b 945 isync ; Make sure this takes before we proceed
4a249263
A
946
947 mftb r9 ; Get the low half of the time base
948 sub r9,r9,r8 ; Get the number of ticks spent waiting
949 sub r4,r4,r9 ; Adjust the decrementer value
950
5353443c 951 mtdec r4 ; Load decrementer with the rest of the timeout
4a249263
A
952 isync ; and make sure,
953 mfdec r4 ; really sure, it gets there
954
955mipowloop:
956 sync ; Make sure queues are clear
5353443c 957 mtmsr r2 ; Nap or doze, MSR with POW, EE set, translation off
4a249263
A
958 isync ; Make sure this takes before we proceed
959 b mipowloop ; loop if POW does not take
960
1c79356b
A
961;
962; Note that the interrupt handler will turn off the nap/doze bits in the hid.
963; Also remember that the interrupt handler will force return to here whenever
964; the nap/doze bits are set.
965;
966 .globl EXT(machine_idle_ret)
967LEXT(machine_idle_ret)
968 mtmsr r7 ; Make sure the MSR is what we want
969 isync ; In case we turn on translation
91447636
A
970;
971; Protect against a lost decrementer trap if the current decrementer value is negative
972; by more than 10 ticks, re-arm it since it is unlikely to fire at this point...
973; A hardware interrupt got us out of machine_idle and may also be contributing to this state
974;
975 mfdec r6 ; Get decrementer
976 cmpwi cr0,r6,-10 ; Compare decrementer with -10
977 bgelr++ ; Return if greater
978 li r0,1 ; Load 1
979 mtdec r0 ; Set decrementer to 1
1c79356b
A
980 blr ; Return...
981
982/* Put machine to sleep.
983 * This call never returns. We always exit sleep via a soft reset.
984 * All external interruptions must be drained at this point and disabled.
985 *
91447636 986 * void ml_ppc_do_sleep(void)
1c79356b
A
987 *
988 * We will use the PPC SLEEP for this.
989 *
990 * There is one bit of hackery in here: we need to enable for
991 * interruptions when we go to sleep and there may be a pending
91447636
A
992 * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for
993 * interruptions. The decrimenter rupt vector recognizes this and returns
1c79356b
A
994 * directly back here.
995 *
996 */
997
998; Force a line boundry here
999 .align 5
91447636 1000 .globl EXT(ml_ppc_do_sleep)
3a60a9f5 1001
91447636
A
1002LEXT(ml_ppc_do_sleep)
1003
1c79356b
A
1004#if 0
1005 mfmsr r5 ; Hack to spin instead of sleep
1006 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1007 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1008 mtmsr r5 ; No talking
1009 isync
1010
1c79356b
A
1011deadsleep: addi r3,r3,1 ; Make analyzer happy
1012 addi r3,r3,1
1013 addi r3,r3,1
1014 b deadsleep ; Die the death of 1000 joys...
1015#endif
1016
91447636
A
1017 mfsprg r12,1 ; Get the current activation
1018 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
1c79356b 1019 mfsprg r11,2 ; Get CPU specific features
91447636
A
1020 eqv r10,r10,r10 ; Get all foxes
1021 mtcrf 0x04,r11 ; move pfNoMSRirb to cr5
1022 mfspr r4,hid0 ; Get the current power-saving mode
1023 mtcrf 0x02,r11 ; move pf64Bit to cr6
9bccf70c 1024
55e303ae 1025 rlwinm. r5,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before sleep?
9bccf70c
A
1026 beq mpsL2PFok
1027
1028 mfspr r5,msscr0 ; Get currect MSSCR0 value
55e303ae 1029 rlwinm r5,r5,0,0,l2pfes-1 ; Disable L2 Prefetch
9bccf70c
A
1030 mtspr msscr0,r5 ; Updates MSSCR0 value
1031 sync
1032 isync
1033
1034mpsL2PFok:
91447636 1035 bt++ pf64Bitb,mpsPF64bit ; PM bits are shifted on 64bit systems.
55e303ae
A
1036
1037 rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though)
1038 oris r4,r4,hi16(sleepm) ; Set sleep
4a249263 1039 b mpsClearDEC
55e303ae
A
1040
1041mpsPF64bit:
4a249263 1042 lis r5, hi16(dozem|napm|sleepm) ; Clear all possible power-saving modes (not DPM though)
55e303ae
A
1043 sldi r5, r5, 32
1044 andc r4, r4, r5
4a249263 1045 lis r5, hi16(napm) ; Set sleep
55e303ae 1046 sldi r5, r5, 32
4a249263 1047 or r4, r4, r5
55e303ae
A
1048
1049mpsClearDEC:
1c79356b
A
1050 mfmsr r5 ; Get the current MSR
1051 rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF
91447636 1052 mtdec r10 ; Load decrimenter with 0x7FFFFFFF
1c79356b
A
1053 isync ; and make sure,
1054 mfdec r9 ; really sure, it gets there
1055
91447636 1056 li r2,1 ; Prepare for 64 bit
1c79356b
A
1057 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1058;
1059; Note that we need translation off before we set the HID to sleep. Otherwise
1060; we will ignore any PTE misses that occur and cause an infinite loop.
1061;
91447636
A
1062 bf++ pf64Bitb,mpsCheckMSR ; check 64-bit processor
1063 rldimi r5,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
1064 mtmsrd r5 ; set 64-bit mode, turn off EE, DR, and IR
1065 isync ; Toss prefetch
1066 b mpsNoMSRx
1067
1068mpsCheckMSR:
1c79356b
A
1069 bt pfNoMSRirb,mpsNoMSR ; No MSR...
1070
1071 mtmsr r5 ; Translation off
1072 isync ; Toss prefetch
1073 b mpsNoMSRx
1074
1075mpsNoMSR:
1076 li r0,loadMSR ; Get the MSR setter SC
1077 mr r3,r5 ; Get new MSR
1078 sc ; Set it
1079mpsNoMSRx:
1080
1081 ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE
1082 sync
1083 mtspr hid0,r4 ; Set up the HID to sleep
55e303ae
A
1084 mfspr r4,hid0 ; Yes, this is silly, keep it here
1085 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1086 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1087 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1088 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1089 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1c79356b 1090
91447636 1091 mtmsr r3 ; Enable for interrupts to drain decrimenter
1c79356b
A
1092
1093 add r6,r4,r5 ; Just waste time
1094 add r6,r6,r4 ; A bit more
1095 add r6,r6,r5 ; A bit more
1096
1097 mtmsr r5 ; Interruptions back off
1098 isync ; Toss prefetch
1099
1c79356b
A
1100;
1101; We are here with translation off, interrupts off, all possible
91447636 1102; interruptions drained off, and a decrimenter that will not pop.
1c79356b
A
1103;
1104
1105 bl EXT(cacheInit) ; Clear out the caches. This will leave them on
1106 bl EXT(cacheDisable) ; Turn off all caches
1107
1108 mfmsr r5 ; Get the current MSR
1109 oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
1110 ; Leave EE off because power goes off shortly
55e303ae
A
1111 mfsprg r12,0 ; Get the per_proc_info
1112 li r10,PP_CPU_FLAGS
1113 lhz r11,PP_CPU_FLAGS(r12) ; Get the flags
1114 ori r11,r11,SleepState ; Marked SleepState
1115 sth r11,PP_CPU_FLAGS(r12) ; Set the flags
1116 dcbf r10,r12
4a249263
A
1117
1118 mfsprg r11,2 ; Get CPU specific features
1119 rlwinm. r0,r11,0,pf64Bitb,pf64Bitb ; Test for 64 bit processor
1120 eqv r4,r4,r4 ; Get all foxes
1121 rlwinm r4,r4,0,1,31 ; Make 0x7FFFFFFF
1122 beq slSleepNow ; skip if 32-bit...
91447636
A
1123 li r3, 0x4000 ; Cause decrimenter to roll over soon
1124 mtdec r3 ; Load decrimenter with 0x00004000
4a249263
A
1125 isync ; and make sure,
1126 mfdec r3 ; really sure, it gets there
1127
55e303ae
A
1128slSleepNow:
1129 sync ; Sync it all up
1c79356b
A
1130 mtmsr r5 ; Do sleep with interruptions enabled
1131 isync ; Take a pill
91447636 1132 mtdec r4 ; Load decrimenter with 0x7FFFFFFF
4a249263
A
1133 isync ; and make sure,
1134 mfdec r3 ; really sure, it gets there
1c79356b
A
1135 b slSleepNow ; Go back to sleep if we wake up...
1136
1137
1138
1139/* Initialize all caches including the TLBs
1140 *
1141 * void cacheInit(void)
1142 *
1143 * This is used to force the caches to an initial clean state. First, we
1144 * check if the cache is on, if so, we need to flush the contents to memory.
1145 * Then we invalidate the L1. Next, we configure and invalidate the L2 etc.
1146 * Finally we turn on all of the caches
1147 *
1148 * Note that if translation is not disabled when this is called, the TLB will not
1149 * be completely clear after return.
1150 *
1151 */
1152
1153; Force a line boundry here
1154 .align 5
1155 .globl EXT(cacheInit)
1156
1157LEXT(cacheInit)
1158
1159 mfsprg r12,0 ; Get the per_proc_info
1160 mfspr r9,hid0 ; Get the current power-saving mode
1161
1162 mfsprg r11,2 ; Get CPU specific features
1163 mfmsr r7 ; Get the current MSR
9bccf70c
A
1164 rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1165 rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
0b4e3aa0 1166 rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs)
1c79356b
A
1167 rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1168 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1169 mtcrf 0x87,r11 ; Get the feature flags
55e303ae
A
1170 lis r10,hi16(dozem|napm|sleepm|dpmm) ; Mask of power management bits
1171 bf-- pf64Bitb,cIniNSF1 ; Skip if 32-bit...
1172
1173 sldi r10,r10,32 ; Position the masks
1174
1175cIniNSF1: andc r4,r9,r10 ; Clean up the old power bits
1c79356b 1176 mtspr hid0,r4 ; Set up the HID
55e303ae
A
1177 mfspr r4,hid0 ; Yes, this is silly, keep it here
1178 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1179 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1180 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1181 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1182 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1c79356b
A
1183
1184 bt pfNoMSRirb,ciNoMSR ; No MSR...
1185
1186 mtmsr r5 ; Translation and all off
1187 isync ; Toss prefetch
1188 b ciNoMSRx
1189
1190ciNoMSR:
1191 li r0,loadMSR ; Get the MSR setter SC
1192 mr r3,r5 ; Get new MSR
1193 sc ; Set it
1194ciNoMSRx:
1195
1196 bf pfAltivecb,cinoDSS ; No Altivec here...
1197
1198 dssall ; Stop streams
1199 sync
1200
55e303ae 1201cinoDSS: li r5,tlbieLock ; Get the TLBIE lock
1c79356b 1202 li r0,128 ; Get number of TLB entries
1c79356b
A
1203
1204 li r6,0 ; Start at 0
55e303ae
A
1205 bf-- pf64Bitb,citlbhang ; Skip if 32-bit...
1206 li r0,1024 ; Get the number of TLB entries
1c79356b
A
1207
1208citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock
1209 mr. r2,r2 ; Is it locked?
1210 bne- citlbhang ; It is locked, go wait...
1211 stwcx. r0,0,r5 ; Try to get it
1212 bne- citlbhang ; We was beat...
1213
1214 mtctr r0 ; Set the CTR
1215
1216cipurgeTLB: tlbie r6 ; Purge this entry
1217 addi r6,r6,4096 ; Next page
1218 bdnz cipurgeTLB ; Do them all...
1219
1220 mtcrf 0x80,r11 ; Set SMP capability
1221 sync ; Make sure all TLB purges are done
1222 eieio ; Order, order in the court
1223
1224 bf pfSMPcapb,cinoSMP ; SMP incapable...
1225
1226 tlbsync ; Sync all TLBs
1227 sync
150bd074 1228 isync
1c79356b 1229
55e303ae
A
1230 bf-- pf64Bitb,cinoSMP ; Skip if 32-bit...
1231 ptesync ; Wait for quiet again
1232 sync
1233
1234cinoSMP: stw r2,tlbieLock(0) ; Unlock TLBIE lock
1235
1236 bt++ pf64Bitb,cin64 ; Skip if 64-bit...
1c79356b 1237
1c79356b
A
1238 rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on?
1239 beq- cinoL1 ; No, no need to flush...
1240
55e303ae
A
1241 rlwinm. r0,r11,0,pfL1fab,pfL1fab ; do we have L1 flush assist?
1242 beq ciswdl1 ; If no hw flush assist, go do by software...
1c79356b
A
1243
1244 mfspr r8,msscr0 ; Get the memory system control register
1245 oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request
1246
1247 mtspr msscr0,r8 ; Start the flush operation
1248
1249ciwdl1f: mfspr r8,msscr0 ; Get the control register again
1250
1251 rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet?
1252 bne ciwdl1f ; No, flush is still in progress...
1253 b ciinvdl1 ; Go invalidate l1...
1254
1255;
1256; We need to either make this very complicated or to use ROM for
1257; the flush. The problem is that if during the following sequence a
1258; snoop occurs that invalidates one of the lines in the cache, the
1259; PLRU sequence will be altered making it possible to miss lines
1260; during the flush. So, we either need to dedicate an area of RAM
1261; to each processor, lock use of a RAM area, or use ROM. ROM is
1262; by far the easiest. Note that this is not an issue for machines
1263; that have harware flush assists.
1264;
1265
1266ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size
0b4e3aa0
A
1267
1268 bf 31,cisnlck ; Skip if pfLClck not set...
1269
91447636
A
1270 mfspr r4,msscr0 ; ?
1271 rlwinm r6,r4,0,0,l2pfes-1 ; ?
0b4e3aa0
A
1272 mtspr msscr0,r6 ; Set it
1273 sync
1274 isync
1275
1276 mfspr r8,ldstcr ; Save the LDSTCR
1277 li r2,1 ; Get a mask of 0x01
1278 lis r3,0xFFF0 ; Point to ROM
1279 rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes
1280
1281 li r6,0 ; Start here
1282
1283cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use
1284 addi r6,r6,32 ; Bump to the next
1285 cmplw r6,r0 ; Have we reached the end?
1286 blt+ cisiniflsh ; Nope, continue initial flush...
1287
1288 sync ; Make sure it is done
1289
1290 addi r11,r11,-1 ; Get mask for index wrap
1291 li r6,0 ; Get starting offset
1292
1293cislckit: not r5,r2 ; Lock all but 1 way
1294 rlwimi r5,r8,0,0,23 ; Build LDSTCR
1295 mtspr ldstcr,r5 ; Lock a way
1296 sync ; Clear out memory accesses
1297 isync ; Wait for all
1298
1299
1300cistouch: lwzx r10,r3,r6 ; Pick up some trash
1301 addi r6,r6,32 ; Go to the next index
1302 and. r0,r6,r11 ; See if we are about to do next index
1303 bne+ cistouch ; Nope, do more...
1304
1305 sync ; Make sure it is all done
1306 isync
1307
1308 sub r6,r6,r11 ; Back up to start + 1
1309 addi r6,r6,-1 ; Get it right
1310
1311cisflush: dcbf r3,r6 ; Flush everything out
1312 addi r6,r6,32 ; Go to the next index
1313 and. r0,r6,r11 ; See if we are about to do next index
1314 bne+ cisflush ; Nope, do more...
1315
1316 sync ; Make sure it is all done
1317 isync
1318
1319
1320 rlwinm. r2,r2,1,24,31 ; Shift to next way
1321 bne+ cislckit ; Do this for all ways...
1322
1323 mtspr ldstcr,r8 ; Slam back to original
1324 sync
1325 isync
1326
91447636 1327 mtspr msscr0,r4 ; ?
0b4e3aa0
A
1328 sync
1329 isync
1330
1331 b cinoL1 ; Go on to level 2...
1332
1333
1334cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size
1c79356b
A
1335 add r0,r0,r2 ; Get 3 times cache size
1336 rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines
1337 lis r3,0xFFF0 ; Dead recon ROM address for now
1338 mtctr r0 ; Number of lines to flush
1339
1340ciswfldl1a: lwz r2,0(r3) ; Flush anything else
1341 addi r3,r3,32 ; Next line
1342 bdnz ciswfldl1a ; Flush the lot...
1343
1344ciinvdl1: sync ; Make sure all flushes have been committed
1345
1346 mfspr r8,hid0 ; Get the HID0 bits
1347 rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables
1348 mtspr hid0,r8 ; and turn off L1 cache
1349 sync ; Make sure all is done
0b4e3aa0
A
1350 isync
1351
1c79356b
A
1352 ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1353 sync
1354 isync
1355
1356 mtspr hid0,r8 ; Start the invalidate and turn on cache
1357 rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1358 mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines)
1359 sync
0b4e3aa0 1360
1c79356b
A
1361
1362cinoL1:
1363;
1364; Flush and disable the level 2
1365;
55e303ae
A
1366 mfsprg r10,2 ; need to check 2 features we did not put in CR
1367 rlwinm. r0,r10,0,pfL2b,pfL2b ; do we have L2?
1368 beq cinol2 ; No level 2 cache to flush
1c79356b
A
1369
1370 mfspr r8,l2cr ; Get the L2CR
1371 lwz r3,pfl2cr(r12) ; Get the L2CR value
d52fe63f
A
1372 rlwinm. r0,r8,0,l2e,l2e ; Was the L2 enabled?
1373 bne ciflushl2 ; Yes, force flush
1374 cmplwi r8, 0 ; Was the L2 all the way off?
1375 beq ciinvdl2 ; Yes, force invalidate
1c79356b
A
1376 lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits
1377 xor r2,r8,r3 ; Get changing bits?
1378 ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits
1379 and. r0,r0,r2 ; Did any change?
1380 bne- ciinvdl2 ; Yes, just invalidate and get PLL synced...
1381
d52fe63f 1382ciflushl2:
55e303ae
A
1383 rlwinm. r0,r10,0,pfL2fab,pfL2fab ; hardware-assisted L2 flush?
1384 beq ciswfl2 ; Flush not in hardware...
1c79356b 1385
d52fe63f 1386 mr r10,r8 ; Take a copy now
1c79356b 1387
0b4e3aa0 1388 bf 31,cinol2lck ; Skip if pfLClck not set...
1c79356b
A
1389
1390 oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only
1391 sync
1392 mtspr l2cr,r10 ; Lock out the cache
1393 sync
1394 isync
1395
1396cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush
1397 sync ; Make sure everything is done
1398
1399 mtspr l2cr,r10 ; Request flush
1400
1401cihwfl2: mfspr r10,l2cr ; Get back the L2CR
1402 rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over?
1403 bne+ cihwfl2 ; Nope, keep going...
1404 b ciinvdl2 ; Flush done, go invalidate L2...
1405
1406ciswfl2:
1407 lwz r0,pfl2Size(r12) ; Get the L2 size
d52fe63f 1408 oris r2,r8,hi16(l2dom) ; Set L2 to data only mode
0b4e3aa0
A
1409
1410 b ciswfl2doa ; Branch to next line...
1411
1412 .align 5
1413ciswfl2doc:
1414 mtspr l2cr,r2 ; Disable L2
1415 sync
1416 isync
1417 b ciswfl2dod ; It is off, go invalidate it...
1418
1419ciswfl2doa:
1420 b ciswfl2dob ; Branch to next...
1421
1422ciswfl2dob:
1423 sync ; Finish memory stuff
1424 isync ; Stop speculation
1425 b ciswfl2doc ; Jump back up and turn on data only...
1426ciswfl2dod:
1c79356b
A
1427 rlwinm r0,r0,27,5,31 ; Get the number of lines
1428 lis r10,0xFFF0 ; Dead recon ROM for now
1429 mtctr r0 ; Set the number of lines
1430
1431ciswfldl2a: lwz r0,0(r10) ; Load something to flush something
1432 addi r10,r10,32 ; Next line
1433 bdnz ciswfldl2a ; Do the lot...
1434
55e303ae 1435ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Clear the enable bit
1c79356b
A
1436 b cinla ; Branch to next line...
1437
1438 .align 5
d52fe63f 1439cinlc: mtspr l2cr,r8 ; Disable L2
1c79356b
A
1440 sync
1441 isync
1442 b ciinvl2 ; It is off, go invalidate it...
1443
1444cinla: b cinlb ; Branch to next...
1445
1446cinlb: sync ; Finish memory stuff
1447 isync ; Stop speculation
1448 b cinlc ; Jump back up and turn off cache...
1449
1450ciinvl2: sync
1451 isync
d52fe63f
A
1452
1453 cmplwi r3, 0 ; Should the L2 be all the way off?
1454 beq cinol2 ; Yes, done with L2
1455
1456 oris r2,r8,hi16(l2im) ; Get the invalidate flag set
1c79356b
A
1457
1458 mtspr l2cr,r2 ; Start the invalidate
1459 sync
1460 isync
1461ciinvdl2a: mfspr r2,l2cr ; Get the L2CR
55e303ae
A
1462 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1463 rlwinm. r0,r0,0,pfL2ib,pfL2ib ; flush in HW?
1464 beq ciinvdl2b ; Flush not in hardware...
1c79356b
A
1465 rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going?
1466 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1467 sync
1468 b cinol2 ; No level 2 cache to flush
1469ciinvdl2b:
1470 rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going?
1471 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1472 sync
d52fe63f 1473 mtspr l2cr,r8 ; Turn off the invalidate request
1c79356b
A
1474
1475cinol2:
1476
1477;
1478; Flush and enable the level 3
1479;
1480 bf pfL3b,cinol3 ; No level 3 cache to flush
1481
1482 mfspr r8,l3cr ; Get the L3CR
1483 lwz r3,pfl3cr(r12) ; Get the L3CR value
d52fe63f
A
1484 rlwinm. r0,r8,0,l3e,l3e ; Was the L3 enabled?
1485 bne ciflushl3 ; Yes, force flush
1486 cmplwi r8, 0 ; Was the L3 all the way off?
1487 beq ciinvdl3 ; Yes, force invalidate
1c79356b
A
1488 lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits
1489 xor r2,r8,r3 ; Get changing bits?
1490 ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits
1491 and. r0,r0,r2 ; Did any change?
1492 bne- ciinvdl3 ; Yes, just invalidate and get PLL synced...
1493
d52fe63f 1494ciflushl3:
1c79356b 1495 sync ; 7450 book says do this even though not needed
d52fe63f 1496 mr r10,r8 ; Take a copy now
1c79356b
A
1497
1498 bf 31,cinol3lck ; Skip if pfL23lck not set...
1499
1500 oris r10,r10,hi16(l3iom) ; Set instruction-only
1501 ori r10,r10,lo16(l3donlym) ; Set data-only
1502 sync
1503 mtspr l3cr,r10 ; Lock out the cache
1504 sync
1505 isync
1506
1507cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush
1508 sync ; Make sure everything is done
1509
1510 mtspr l3cr,r10 ; Request flush
1511
1512cihwfl3: mfspr r10,l3cr ; Get back the L3CR
1513 rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over?
1514 bne+ cihwfl3 ; Nope, keep going...
1515
55e303ae 1516ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Clear the enable bit
1c79356b 1517 sync ; Make sure of life, liberty, and justice
d52fe63f 1518 mtspr l3cr,r8 ; Disable L3
1c79356b
A
1519 sync
1520
d52fe63f
A
1521 cmplwi r3, 0 ; Should the L3 be all the way off?
1522 beq cinol3 ; Yes, done with L3
1c79356b 1523
d52fe63f 1524 ori r8,r8,lo16(l3im) ; Get the invalidate flag set
1c79356b 1525
d52fe63f
A
1526 mtspr l3cr,r8 ; Start the invalidate
1527
1528ciinvdl3b: mfspr r8,l3cr ; Get the L3CR
1529 rlwinm. r8,r8,0,l3i,l3i ; Is the invalidate still going?
1c79356b
A
1530 bne+ ciinvdl3b ; Assume so...
1531 sync
1532
91447636
A
1533 lwz r10, pfBootConfig(r12) ; ?
1534 rlwinm. r10, r10, 24, 28, 31 ; ?
1535 beq ciinvdl3nopdet ; ?
1536
1537 mfspr r8,l3pdet ; ?
1538 srw r2, r8, r10 ; ?
1539 rlwimi r2, r8, 0, 24, 31 ; ?
1540 subfic r10, r10, 32 ; ?
1541 li r8, -1 ; ?
1542 ori r2, r2, 0x0080 ; ?
1543 slw r8, r8, r10 ; ?
1544 or r8, r2, r8 ; ?
1545 mtspr l3pdet, r8 ; ?
1c79356b
A
1546 isync
1547
7b1edb79 1548ciinvdl3nopdet:
d52fe63f
A
1549 mfspr r8,l3cr ; Get the L3CR
1550 rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
1551 mtspr l3cr,r8 ; Disable the clock
1c79356b 1552
91447636
A
1553 li r2,128 ; ?
1554ciinvdl3c: addi r2,r2,-1 ; ?
1555 cmplwi r2,0 ; ?
1c79356b
A
1556 bne+ ciinvdl3c
1557
91447636
A
1558 mfspr r10,msssr0 ; ?
1559 rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ?
1560 mtspr msssr0,r10 ; ?
1c79356b
A
1561 sync
1562
d52fe63f 1563 mtspr l3cr,r3 ; Enable it as desired
1c79356b
A
1564 sync
1565cinol3:
55e303ae
A
1566 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1567 rlwinm. r0,r0,0,pfL2b,pfL2b ; is there an L2 cache?
1568 beq cinol2a ; No level 2 cache to enable
1c79356b
A
1569
1570 lwz r3,pfl2cr(r12) ; Get the L2CR value
d52fe63f 1571 cmplwi r3, 0 ; Should the L2 be all the way off?
55e303ae 1572 beq cinol2a : Yes, done with L2
d52fe63f 1573 mtspr l2cr,r3 ; Enable it as desired
1c79356b
A
1574 sync
1575
1576;
1577; Invalidate and turn on L1s
1578;
1579
0b4e3aa0
A
1580cinol2a:
1581 bt 31,cinoexit ; Skip if pfLClck set...
1582
1583 rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1c79356b
A
1584 mtspr hid0,r8 ; Turn off dem caches
1585 sync
1586
1587 ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1588 rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1589 sync
1590 isync
1591
1592 mtspr hid0,r8 ; Start the invalidate and turn on L1 cache
0b4e3aa0
A
1593
1594cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions
1c79356b
A
1595 sync
1596 mtmsr r7 ; Restore MSR to entry
1597 isync
1598 blr ; Return...
1599
1600
55e303ae
A
1601;
1602; Handle 64-bit architecture
1603; This processor can not run without caches, so we just push everything out
1604; and flush. It will be relativily clean afterwards
1605;
1606
1607 .align 5
1608
1609cin64:
55e303ae
A
1610 mfspr r10,hid1 ; Save hid1
1611 mfspr r4,hid4 ; Save hid4
1612 mr r12,r10 ; Really save hid1
1613 mr r11,r4 ; Get a working copy of hid4
1614
1615 li r0,0 ; Get a 0
1616 eqv r2,r2,r2 ; Get all foxes
1617
1618 rldimi r10,r0,55,7 ; Clear I$ prefetch bits (7:8)
1619
1620 isync
1621 mtspr hid1,r10 ; Stick it
1622 mtspr hid1,r10 ; Stick it again
1623 isync
1624
1625 rldimi r11,r2,38,25 ; Disable D$ prefetch (25:25)
1626
1627 sync
1628 mtspr hid4,r11 ; Stick it
1629 isync
1630
1631 li r3,8 ; Set bit 28+32
1632 sldi r3,r3,32 ; Make it bit 28
1633 or r3,r3,r11 ; Turn on the flash invalidate L1D$
1634
1635 oris r5,r11,0x0600 ; Set disable L1D$ bits
1636 sync
1637 mtspr hid4,r3 ; Invalidate
1638 isync
1639
1640 mtspr hid4,r5 ; Un-invalidate and disable L1D$
1641 isync
1642
1643 lis r8,GUSModeReg ; Get the GUS mode ring address
1644 mfsprg r0,2 ; Get the feature flags
1645 ori r8,r8,0x8000 ; Set to read data
1646 rlwinm. r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
1647
1648 sync
1649
1650 mtspr scomc,r8 ; Request the GUS mode
1651 mfspr r11,scomd ; Get the GUS mode
1652 mfspr r8,scomc ; Get back the status (we just ignore it)
1653 sync
1654 isync
1655
1656 sld r11,r11,r0 ; Fix up if needed
1657
1658 ori r6,r11,lo16(GUSMdmapen) ; Set the bit that means direct L2 cache address
1659 lis r8,GUSModeReg ; Get GUS mode register address
1660
1661 sync
1662
1663 mtspr scomd,r6 ; Set that we want direct L2 mode
1664 mtspr scomc,r8 ; Tell GUS we want direct L2 mode
1665 mfspr r3,scomc ; Get back the status
1666 sync
1667 isync
1668
1669 li r3,0 ; Clear start point
1670
1671cflushlp: lis r6,0x0040 ; Pick 4MB line as our target
1672 or r6,r6,r3 ; Put in the line offset
1673 lwz r5,0(r6) ; Load a line
1674 addis r6,r6,8 ; Roll bit 42:44
1675 lwz r5,0(r6) ; Load a line
1676 addis r6,r6,8 ; Roll bit 42:44
1677 lwz r5,0(r6) ; Load a line
1678 addis r6,r6,8 ; Roll bit 42:44
1679 lwz r5,0(r6) ; Load a line
1680 addis r6,r6,8 ; Roll bit 42:44
1681 lwz r5,0(r6) ; Load a line
1682 addis r6,r6,8 ; Roll bit 42:44
1683 lwz r5,0(r6) ; Load a line
1684 addis r6,r6,8 ; Roll bit 42:44
1685 lwz r5,0(r6) ; Load a line
1686 addis r6,r6,8 ; Roll bit 42:44
1687 lwz r5,0(r6) ; Load a line
1688
1689 addi r3,r3,128 ; Next line
1690 andis. r5,r3,8 ; Have we done enough?
1691 beq++ cflushlp ; Not yet...
1692
1693 sync
1694
1695 lis r6,0x0040 ; Pick 4MB line as our target
1696
1697cflushx: dcbf 0,r6 ; Flush line and invalidate
1698 addi r6,r6,128 ; Next line
1699 andis. r5,r6,0x0080 ; Have we done enough?
1700 beq++ cflushx ; Keep on flushing...
1701
1702 mr r3,r10 ; Copy current hid1
1703 rldimi r3,r2,54,9 ; Set force icbi match mode
1704
1705 li r6,0 ; Set start if ICBI range
1706 isync
1707 mtspr hid1,r3 ; Stick it
1708 mtspr hid1,r3 ; Stick it again
1709 isync
1710
1711cflicbi: icbi 0,r6 ; Kill I$
1712 addi r6,r6,128 ; Next line
1713 andis. r5,r6,1 ; Have we done them all?
1714 beq++ cflicbi ; Not yet...
1715
1716 lis r8,GUSModeReg ; Get GUS mode register address
1717
1718 sync
1719
1720 mtspr scomd,r11 ; Set that we do not want direct mode
1721 mtspr scomc,r8 ; Tell GUS we do not want direct mode
1722 mfspr r3,scomc ; Get back the status
1723 sync
1724 isync
4a249263
A
1725
1726 isync
1727 mtspr hid0,r9 ; Restore entry hid0
1728 mfspr r9,hid0 ; Yes, this is silly, keep it here
1729 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1730 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1731 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1732 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1733 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1734 isync
1735
55e303ae
A
1736 isync
1737 mtspr hid1,r12 ; Restore entry hid1
1738 mtspr hid1,r12 ; Stick it again
1739 isync
1740
1741 sync
1742 mtspr hid4,r4 ; Restore entry hid4
1743 isync
1744
1745 sync
1746 mtmsr r7 ; Restore MSR to entry
1747 isync
1748 blr ; Return...
1749
1750
1751
1c79356b
A
1752/* Disables all caches
1753 *
1754 * void cacheDisable(void)
1755 *
1756 * Turns off all caches on the processor. They are not flushed.
1757 *
1758 */
1759
1760; Force a line boundry here
1761 .align 5
1762 .globl EXT(cacheDisable)
1763
1764LEXT(cacheDisable)
1765
1766 mfsprg r11,2 ; Get CPU specific features
1767 mtcrf 0x83,r11 ; Set feature flags
1768
1769 bf pfAltivecb,cdNoAlt ; No vectors...
1770
1771 dssall ; Stop streams
1772
1773cdNoAlt: sync
1774
55e303ae
A
1775 btlr pf64Bitb ; No way to disable a 64-bit machine...
1776
1c79356b
A
1777 mfspr r5,hid0 ; Get the hid
1778 rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1779 mtspr hid0,r5 ; Turn off dem caches
1780 sync
1781
55e303ae
A
1782 rlwinm. r0,r11,0,pfL2b,pfL2b ; is there an L2?
1783 beq cdNoL2 ; Skip if no L2...
1784
1c79356b
A
1785 mfspr r5,l2cr ; Get the L2
1786 rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit
1787
1788 b cinlaa ; Branch to next line...
1789
1790 .align 5
1791cinlcc: mtspr l2cr,r5 ; Disable L2
1792 sync
1793 isync
1794 b cdNoL2 ; It is off, we are done...
1795
1796cinlaa: b cinlbb ; Branch to next...
1797
1798cinlbb: sync ; Finish memory stuff
1799 isync ; Stop speculation
1800 b cinlcc ; Jump back up and turn off cache...
1801
1802cdNoL2:
55e303ae 1803
1c79356b
A
1804 bf pfL3b,cdNoL3 ; Skip down if no L3...
1805
1806 mfspr r5,l3cr ; Get the L3
1807 rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit
1808 rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit
1809 mtspr l3cr,r5 ; Disable the caches
1810 sync
1811
1812cdNoL3:
1813 blr ; Leave...
1814
1815
1816/* Initialize processor thermal monitoring
1817 * void ml_thrm_init(void)
1818 *
483a1d10 1819 * Obsolete, deprecated and will be removed.
1c79356b
A
1820 */
1821
1822; Force a line boundry here
1823 .align 5
1824 .globl EXT(ml_thrm_init)
1825
1826LEXT(ml_thrm_init)
1c79356b
A
1827 blr
1828
1c79356b
A
1829/* Set thermal monitor bounds
1830 * void ml_thrm_set(unsigned int low, unsigned int high)
1831 *
483a1d10 1832 * Obsolete, deprecated and will be removed.
1c79356b
A
1833 */
1834
1835; Force a line boundry here
1836 .align 5
1837 .globl EXT(ml_thrm_set)
1838
1839LEXT(ml_thrm_set)
483a1d10 1840 blr
1c79356b
A
1841
1842/* Read processor temprature
1843 * unsigned int ml_read_temp(void)
1844 *
483a1d10 1845 * Obsolete, deprecated and will be removed.
1c79356b
A
1846 */
1847
1848; Force a line boundry here
1849 .align 5
1850 .globl EXT(ml_read_temp)
1851
1852LEXT(ml_read_temp)
483a1d10
A
1853 li r3,-1
1854 blr
1c79356b
A
1855
1856/* Throttle processor speed up or down
1857 * unsigned int ml_throttle(unsigned int step)
1858 *
1859 * Returns old speed and sets new. Both step and return are values from 0 to
1860 * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2.
1861 *
483a1d10 1862 * Obsolete, deprecated and will be removed.
1c79356b
A
1863 */
1864
1865; Force a line boundry here
1866 .align 5
1867 .globl EXT(ml_throttle)
1868
1869LEXT(ml_throttle)
483a1d10
A
1870 li r3,0
1871 blr
1c79356b
A
1872
1873/*
1874** ml_get_timebase()
1875**
1876** Entry - R3 contains pointer to 64 bit structure.
1877**
1878** Exit - 64 bit structure filled in.
1879**
1880*/
1881; Force a line boundry here
1882 .align 5
1883 .globl EXT(ml_get_timebase)
1884
1885LEXT(ml_get_timebase)
1886
1887loop:
55e303ae
A
1888 mftbu r4
1889 mftb r5
1890 mftbu r6
1891 cmpw r6, r4
1892 bne- loop
1893
1894 stw r4, 0(r3)
1895 stw r5, 4(r3)
1896
1897 blr
1c79356b 1898
55e303ae
A
1899/*
1900 * unsigned int cpu_number(void)
1901 *
1902 * Returns the current cpu number.
1903 */
1904
1905 .align 5
1906 .globl EXT(cpu_number)
1907
1908LEXT(cpu_number)
91447636
A
1909 mfsprg r4,1 ; Get the current activation
1910 lwz r4,ACT_PER_PROC(r4) ; Get the per_proc block
55e303ae
A
1911 lhz r3,PP_CPU_NUMBER(r4) ; Get CPU number
1912 blr ; Return...
9bccf70c 1913
91447636
A
1914/*
1915 * processor_t current_processor(void)
1916 *
1917 * Returns the current processor.
1918 */
1919
1920 .align 5
1921 .globl EXT(current_processor)
1922
1923LEXT(current_processor)
1924 mfsprg r3,1 ; Get the current activation
1925 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1926 addi r3,r3,PP_PROCESSOR
1927 blr
1928
1929#if PROCESSOR_SIZE > PP_PROCESSOR_SIZE
1930#error processor overflows per_proc
1931#endif
d7e50217
A
1932
1933/*
91447636 1934 * ast_t *ast_pending(void)
55e303ae 1935 *
91447636
A
1936 * Returns the address of the pending AST mask for the current processor.
1937 */
1938
1939 .align 5
1940 .globl EXT(ast_pending)
1941
1942LEXT(ast_pending)
1943 mfsprg r3,1 ; Get the current activation
1944 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1945 addi r3,r3,PP_PENDING_AST
1946 blr ; Return...
1947
1948/*
1949 * void machine_set_current_thread(thread_t)
1950 *
1951 * Set the current thread
d7e50217 1952 */
55e303ae 1953 .align 5
91447636 1954 .globl EXT(machine_set_current_thread)
d7e50217 1955
91447636 1956LEXT(machine_set_current_thread)
55e303ae 1957
91447636
A
1958 mfsprg r4,1 ; Get spr1
1959 lwz r5,ACT_PER_PROC(r4) ; Get the PerProc from the previous active thread
1960 stw r5,ACT_PER_PROC(r3) ; Set the PerProc in the active thread
55e303ae
A
1961 mtsprg 1,r3 ; Set spr1 with the active thread
1962 blr ; Return...
1963
1964/*
55e303ae 1965 * thread_t current_thread(void)
91447636 1966 * thread_t current_act(void)
55e303ae
A
1967 *
1968 *
1969 * Return the current thread for outside components.
1970 */
1971 .align 5
55e303ae 1972 .globl EXT(current_thread)
91447636 1973 .globl EXT(current_act)
55e303ae 1974
55e303ae 1975LEXT(current_thread)
91447636 1976LEXT(current_act)
55e303ae
A
1977
1978 mfsprg r3,1
1979 blr
55e303ae
A
1980
1981 .align 5
1982 .globl EXT(mach_absolute_time)
1983LEXT(mach_absolute_time)
19841: mftbu r3
1985 mftb r4
1986 mftbu r0
1987 cmpw r0,r3
91447636 1988 bne-- 1b
55e303ae 1989 blr
9bccf70c 1990
1c79356b
A
1991/*
1992** ml_sense_nmi()
1993**
1994*/
1995; Force a line boundry here
1996 .align 5
1997 .globl EXT(ml_sense_nmi)
1998
1999LEXT(ml_sense_nmi)
2000
2001 blr ; Leave...
2002
d52fe63f 2003/*
91447636 2004** ml_set_processor_speed_powertune()
d52fe63f
A
2005**
2006*/
2007; Force a line boundry here
2008 .align 5
5353443c 2009 .globl EXT(ml_set_processor_speed_powertune)
d52fe63f 2010
5353443c 2011LEXT(ml_set_processor_speed_powertune)
483a1d10
A
2012 mflr r0 ; Save the link register
2013 stwu r1, -(FM_ALIGN(4*4)+FM_SIZE)(r1) ; Make some space on the stack
2014 stw r28, FM_ARG0+0x00(r1) ; Save a register
2015 stw r29, FM_ARG0+0x04(r1) ; Save a register
2016 stw r30, FM_ARG0+0x08(r1) ; Save a register
2017 stw r31, FM_ARG0+0x0C(r1) ; Save a register
2018 stw r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 2019
91447636
A
2020 mfsprg r31,1 ; Get the current activation
2021 lwz r31,ACT_PER_PROC(r31) ; Get the per_proc block
483a1d10 2022
483a1d10
A
2023 rlwinm r28, r3, 31-dnap, dnap, dnap ; Shift the 1 bit to the dnap+32 bit
2024 rlwinm r3, r3, 2, 29, 29 ; Shift the 1 to a 4 and mask
2025 addi r3, r3, pfPowerTune0 ; Add in the pfPowerTune0 offset
2026 lwzx r29, r31, r3 ; Load the PowerTune number 0 or 1
2027
2028 sldi r28, r28, 32 ; Shift to the top half
2029 ld r3, pfHID0(r31) ; Load the saved hid0 value
2030 and r28, r28, r3 ; Save the dnap bit
2031 lis r4, hi16(dnapm) ; Make a mask for the dnap bit
2032 sldi r4, r4, 32 ; Shift to the top half
2033 andc r3, r3, r4 ; Clear the dnap bit
2034 or r28, r28, r3 ; Insert the dnap bit as needed for later
2035
2036 sync
2037 mtspr hid0, r3 ; Turn off dnap in hid0
2038 mfspr r3, hid0 ; Yes, this is silly, keep it here
2039 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2040 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2041 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2042 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2043 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2044 isync ; Make sure it is set
2045
2046 lis r3, hi16(PowerTuneControlReg) ; Write zero to the PCR
2047 ori r3, r3, lo16(PowerTuneControlReg)
2048 li r4, 0
2049 li r5, 0
2050 bl _ml_scom_write
2051
2052 lis r3, hi16(PowerTuneControlReg) ; Write the PowerTune value to the PCR
2053 ori r3, r3, lo16(PowerTuneControlReg)
2054 li r4, 0
2055 mr r5, r29
2056 bl _ml_scom_write
2057
2058 rlwinm r29, r29, 13-6, 6, 7 ; Move to PSR speed location and isolate the requested speed
2059spsPowerTuneLoop:
2060 lis r3, hi16(PowerTuneStatusReg) ; Read the status from the PSR
2061 ori r3, r3, lo16(PowerTuneStatusReg)
2062 li r4, 0
2063 bl _ml_scom_read
2064 srdi r5, r5, 32
2065 rlwinm r0, r5, 0, 6, 7 ; Isolate the current speed
2066 rlwimi r0, r5, 0, 2, 2 ; Copy in the change in progress bit
2067 cmpw r0, r29 ; Compare the requested and current speeds
2068 beq spsPowerTuneDone
2069 rlwinm. r0, r5, 0, 3, 3
2070 beq spsPowerTuneLoop
2071
2072spsPowerTuneDone:
2073 sync
2074 mtspr hid0, r28 ; Turn on dnap in hid0 if needed
2075 mfspr r28, hid0 ; Yes, this is silly, keep it here
2076 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2077 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2078 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2079 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2080 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2081 isync ; Make sure it is set
2082
483a1d10
A
2083 lwz r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
2084 lwz r28, FM_ARG0+0x00(r1) ; Restore a register
2085 lwz r29, FM_ARG0+0x04(r1) ; Restore a register
2086 lwz r30, FM_ARG0+0x08(r1) ; Restore a register
2087 lwz r31, FM_ARG0+0x0C(r1) ; Restore a register
2088 lwz r1, FM_BACKPTR(r1) ; Pop the stack
2089 mtlr r0
d52fe63f 2090 blr
d12e1678 2091
5353443c
A
2092/*
2093** ml_set_processor_speed_dpll()
2094**
2095*/
2096; Force a line boundry here
2097 .align 5
2098 .globl EXT(ml_set_processor_speed_dpll)
2099
2100LEXT(ml_set_processor_speed_dpll)
91447636
A
2101 mfsprg r5,1 ; Get the current activation
2102 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
5353443c
A
2103
2104 cmplwi r3, 0 ; Turn off BTIC before low speed
2105 beq spsDPLL1
2106 mfspr r4, hid0 ; Get the current hid0 value
2107 rlwinm r4, r4, 0, btic+1, btic-1 ; Clear the BTIC bit
2108 sync
2109 mtspr hid0, r4 ; Set the new hid0 value
2110 isync
2111 sync
2112
2113spsDPLL1:
2114 mfspr r4, hid1 ; Get the current PLL settings
2115 rlwimi r4, r3, 31-hid1ps, hid1ps, hid1ps ; Copy the PLL Select bit
2116 stw r4, pfHID1(r5) ; Save the new hid1 value
2117 mtspr hid1, r4 ; Select desired PLL
2118
2119 cmplwi r3, 0 ; Restore BTIC after high speed
2120 bne spsDPLL2
2121 lwz r4, pfHID0(r5) ; Load the hid0 value
2122 sync
2123 mtspr hid0, r4 ; Set the hid0 value
2124 isync
2125 sync
2126spsDPLL2:
2127 blr
2128
2129
2130/*
3a60a9f5
A
2131** ml_set_processor_speed_dfs(divideby)
2132** divideby == 0 then divide by 1 (full speed)
2133** divideby == 1 then divide by 2 (half speed)
2134** divideby == 2 then divide by 4 (quarter speed)
2135** divideby == 3 then divide by 4 (quarter speed) - preferred
5353443c
A
2136**
2137*/
2138; Force a line boundry here
2139 .align 5
2140 .globl EXT(ml_set_processor_speed_dfs)
2141
2142LEXT(ml_set_processor_speed_dfs)
5353443c 2143
3a60a9f5
A
2144 mfspr r4,hid1 ; Get the current HID1
2145 mfsprg r5,0 ; Get the per_proc_info
2146 rlwimi r4,r3,31-hid1dfs1,hid1dfs0,hid1dfs1 ; Stick the new divider bits in
2147 stw r4,pfHID1(r5) ; Save the new hid1 value
5353443c 2148 sync
3a60a9f5 2149 mtspr hid1,r4 ; Set the new HID1
5353443c
A
2150 sync
2151 isync
2152 blr
2153
2154
d12e1678
A
2155/*
2156** ml_set_processor_voltage()
2157**
2158*/
2159; Force a line boundry here
2160 .align 5
2161 .globl EXT(ml_set_processor_voltage)
2162
2163LEXT(ml_set_processor_voltage)
91447636
A
2164 mfsprg r5,1 ; Get the current activation
2165 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
4a249263
A
2166
2167 lwz r6, pfPowerModes(r5) ; Get the supported power modes
2168
2169 rlwinm. r0, r6, 0, pmDPLLVminb, pmDPLLVminb ; Is DPLL Vmin supported
2170 beq spvDone
2171
2172 mfspr r4, hid2 ; Get HID2 value
2173 rlwimi r4, r3, 31-hid2vmin, hid2vmin, hid2vmin ; Insert the voltage mode bit
2174 mtspr hid2, r4 ; Set the voltage mode
2175 sync ; Make sure it is done
2176
2177spvDone:
d12e1678 2178 blr
483a1d10
A
2179
2180
2181;
2182; unsigned int ml_scom_write(unsigned int reg, unsigned long long data)
2183; 64-bit machines only
2184; returns status
2185;
2186
2187 .align 5
2188 .globl EXT(ml_scom_write)
2189
2190LEXT(ml_scom_write)
2191
2192 rldicr r3,r3,8,47 ; Align register it correctly
2193 rldimi r5,r4,32,0 ; Merge the high part of data
2194 sync ; Clean up everything
2195
2196 mtspr scomd,r5 ; Stick in the data
2197 mtspr scomc,r3 ; Set write to register
2198 sync
2199 isync
2200
2201 mfspr r3,scomc ; Read back status
2202 blr ; leave....
2203
2204;
2205; unsigned int ml_read_scom(unsigned int reg, unsigned long long *data)
2206; 64-bit machines only
2207; returns status
2208; ASM Callers: data (r4) can be zero and the 64 bit data will be returned in r5
2209;
2210
2211 .align 5
2212 .globl EXT(ml_scom_read)
2213
2214LEXT(ml_scom_read)
2215
2216 mfsprg r0,2 ; Get the feature flags
2217 rldicr r3,r3,8,47 ; Align register it correctly
2218 rlwinm r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
2219
2220 ori r3,r3,0x8000 ; Set to read data
2221 sync
2222
2223 mtspr scomc,r3 ; Request the register
2224 mfspr r5,scomd ; Get the register contents
2225 mfspr r3,scomc ; Get back the status
2226 sync
2227 isync
2228
2229 sld r5,r5,r0 ; Fix up if needed
2230
2231 cmplwi r4, 0 ; If data pointer is null, just return
2232 beqlr ; the received data in r5
2233 std r5,0(r4) ; Pass back the received data
2234 blr ; Leave...
a3d08fcd
A
2235
2236;
2237; Calculates the hdec to dec ratio
2238;
2239
2240 .align 5
2241 .globl EXT(ml_hdec_ratio)
2242
2243LEXT(ml_hdec_ratio)
2244
2245 li r0,0 ; Clear the EE bit (and everything else for that matter)
2246 mfmsr r11 ; Get the MSR
2247 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
2248 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
2249 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
2250 or r12,r10,r11 ; Turn on EE if on before we turned it off
2251
2252 mftb r9 ; Get time now
2253 mfspr r2,hdec ; Save hdec
2254
2255mhrcalc: mftb r8 ; Get time now
2256 sub r8,r8,r9 ; How many ticks?
2257 cmplwi r8,10000 ; 10000 yet?
2258 blt mhrcalc ; Nope...
2259
2260 mfspr r9,hdec ; Get hdec now
2261 sub r3,r2,r9 ; How many ticks?
2262 mtmsrd r12,1 ; Flip EE on if needed
2263 blr ; Leave...
3a60a9f5
A
2264
2265
2266;
2267; int setPop(time)
2268;
2269; Calculates the number of ticks to the supplied event and
2270; sets the decrementer. Never set the time for less that the
2271; minimum, which is 10, nor more than maxDec, which is usually 0x7FFFFFFF
2272; and never more than that but can be set by root.
2273;
2274;
2275
2276 .align 7
2277 .globl EXT(setPop)
2278
2279#define kMin 10
2280
2281LEXT(setPop)
2282
2283spOver: mftbu r8 ; Get upper time
2284 addic r2,r4,-kMin ; Subtract minimum from target
2285 mftb r9 ; Get lower
2286 addme r11,r3 ; Do you have any bits I could borrow?
2287 mftbu r10 ; Get upper again
2288 subfe r0,r0,r0 ; Get -1 if we went negative 0 otherwise
2289 subc r7,r2,r9 ; Subtract bottom and get carry
2290 cmplw r8,r10 ; Did timebase upper tick?
2291 subfe r6,r8,r11 ; Get the upper difference accounting for borrow
2292 lwz r12,maxDec(0) ; Get the maximum decrementer size
2293 addme r0,r0 ; Get -1 or -2 if anything negative, 0 otherwise
2294 addic r2,r6,-1 ; Set carry if diff < 2**32
2295 srawi r0,r0,1 ; Make all foxes
2296 subi r10,r12,kMin ; Adjust maximum for minimum adjust
2297 andc r7,r7,r0 ; Pin time at 0 if under minimum
2298 subfe r2,r2,r2 ; 0 if diff > 2**32, -1 otherwise
2299 sub r7,r7,r10 ; Negative if duration is less than (max - min)
13fec989 2300 or r2,r2,r0 ; If the duration is negative, it is not too big
3a60a9f5
A
2301 srawi r0,r7,31 ; -1 if duration is too small
2302 and r7,r7,r2 ; Clear duration if high part too big
2303 and r7,r7,r0 ; Clear duration if low part too big
2304 bne-- spOver ; Timer ticked...
2305 add r3,r7,r12 ; Add back the max for total
2306 mtdec r3 ; Set the decrementer
2307 blr ; Leave...
2308
2309