]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines_asm.s
a03a716d96ad8bac993c250fe852f5024e6fda22
[apple/xnu.git] / osfmk / ppc / machine_routines_asm.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <ppc/asm.h>
23 #include <ppc/proc_reg.h>
24 #include <cpus.h>
25 #include <assym.s>
26 #include <debug.h>
27 #include <mach/ppc/vm_param.h>
28 #include <ppc/exception.h>
29
30 /* PCI config cycle probing
31 *
32 * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val)
33 *
34 * Read the memory location at physical address paddr.
35 * This is a part of a device probe, so there is a good chance we will
36 * have a machine check here. So we have to be able to handle that.
37 * We assume that machine checks are enabled both in MSR and HIDs
38 */
39
40 ; Force a line boundry here
41 .align 5
42 .globl EXT(ml_probe_read)
43
44 LEXT(ml_probe_read)
45
46 mfsprg r9,2 ; Get feature flags
47 mfmsr r0 ; Save the current MSR
48 neg r10,r3 ; Number of bytes to end of page
49 rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions
50 rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry
51 mr r12,r3 ; Save the load address
52 mtcrf 0x04,r9 ; Set the features
53 cmplwi cr1,r10,4 ; At least 4 bytes left in page?
54 rlwinm r2,r2,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Clear translation
55 beq- mprdoit ; We are right on the boundary...
56 li r3,0
57 bltlr- cr1 ; No, just return failure...
58
59 mprdoit:
60
61 bt pfNoMSRirb,mprNoMSR ; No MSR...
62
63 mtmsr r2 ; Translation and all off
64 isync ; Toss prefetch
65 b mprNoMSRx
66
67 mprNoMSR:
68 mr r5,r0
69 li r0,loadMSR ; Get the MSR setter SC
70 mr r3,r2 ; Get new MSR
71 sc ; Set it
72 mr r0,r5
73 li r3,0
74 mprNoMSRx:
75
76 mfspr r6, hid0 ; Get a copy of hid0
77
78
79 ;
80 ; We need to insure that there is no more than 1 BAT register that
81 ; can get a hit. There could be repercussions beyond the ken
82 ; of mortal man. It is best not to tempt fate.
83 ;
84
85 ; Note: we will reload these from the shadow BATs later
86
87 li r10,0 ; Clear a register
88
89 sync ; Make sure all is well
90
91 mtdbatu 1,r10 ; Invalidate DBAT 1
92 mtdbatu 2,r10 ; Invalidate DBAT 2
93 mtdbatu 3,r10 ; Invalidate DBAT 3
94
95 rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary
96 ori r11,r10,0x32 ; Set uncached, coherent, R/W
97 ori r10,r10,2 ; Make the upper half (128k, valid supervisor)
98 mtdbatl 0,r11 ; Set lower BAT first
99 mtdbatu 0,r10 ; Now the upper
100 sync ; Just make sure
101
102 dcbf 0,r12 ; Make sure we kill the cache to avoid paradoxes
103 sync
104
105 ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation
106 mtmsr r11 ; Do it for real
107 isync ; Make sure of it
108
109 eieio ; Make sure of all previous accesses
110 sync ; Make sure it is all caught up
111
112 lwz r11,0(r12) ; Get it and maybe machine check here
113
114 eieio ; Make sure of ordering again
115 sync ; Get caught up yet again
116 isync ; Do not go further till we are here
117
118 mtmsr r2 ; Turn translation back off
119 isync
120
121 mtspr hid0, r6 ; Restore HID0
122 isync
123
124 lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
125 ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
126
127 lwz r5,0(r10) ; Pick up DBAT 0 high
128 lwz r6,4(r10) ; Pick up DBAT 0 low
129 lwz r7,8(r10) ; Pick up DBAT 1 high
130 lwz r8,16(r10) ; Pick up DBAT 2 high
131 lwz r9,24(r10) ; Pick up DBAT 3 high
132
133 mtdbatu 0,r5 ; Restore DBAT 0 high
134 mtdbatl 0,r6 ; Restore DBAT 0 low
135 mtdbatu 1,r7 ; Restore DBAT 1 high
136 mtdbatu 2,r8 ; Restore DBAT 2 high
137 mtdbatu 3,r9 ; Restore DBAT 3 high
138 sync
139
140 li r3,1 ; We made it
141
142 mtmsr r0 ; Restore translation and exceptions
143 isync ; Toss speculations
144
145 stw r11,0(r4) ; Save the loaded value
146 blr ; Return...
147
148 ; Force a line boundry here. This means we will be able to check addresses better
149 .align 5
150 .globl EXT(ml_probe_read_mck)
151 LEXT(ml_probe_read_mck)
152
153 /* Read physical address
154 *
155 * unsigned int ml_phys_read_byte(vm_offset_t paddr)
156 *
157 * Read the byte at physical address paddr. Memory should not be cache inhibited.
158 */
159
160 ; Force a line boundry here
161 .align 5
162 .globl EXT(ml_phys_read_byte)
163
164 LEXT(ml_phys_read_byte)
165
166 mfmsr r10 ; Save the current MSR
167 rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions
168 rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation
169
170 mtmsr r4 ; Translation and all off
171 isync ; Toss prefetch
172
173 lbz r3,0(r3) ; Get the byte
174 sync
175
176 mtmsr r10 ; Restore translation and rupts
177 isync
178 blr
179
180 /* Read physical address
181 *
182 * unsigned int ml_phys_read(vm_offset_t paddr)
183 *
184 * Read the word at physical address paddr. Memory should not be cache inhibited.
185 */
186
187 ; Force a line boundry here
188 .align 5
189 .globl EXT(ml_phys_read)
190
191 LEXT(ml_phys_read)
192
193 mfmsr r0 ; Save the current MSR
194 rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions
195 rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation
196
197 mtmsr r4 ; Translation and all off
198 isync ; Toss prefetch
199
200 lwz r3,0(r3) ; Get the word
201 sync
202
203 mtmsr r0 ; Restore translation and rupts
204 isync
205 blr
206
207 /* Write physical address byte
208 *
209 * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
210 *
211 * Write the byte at physical address paddr. Memory should not be cache inhibited.
212 */
213
214 ; Force a line boundry here
215 .align 5
216 .globl EXT(ml_phys_write_byte)
217
218 LEXT(ml_phys_write_byte)
219
220 mfmsr r0 ; Save the current MSR
221 rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions
222 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation
223
224 mtmsr r5 ; Translation and all off
225 isync ; Toss prefetch
226
227 stb r4,0(r3) ; Set the byte
228 sync
229
230 mtmsr r0 ; Restore translation and rupts
231 isync
232 blr
233
234 /* Write physical address
235 *
236 * void ml_phys_write(vm_offset_t paddr, unsigned int data)
237 *
238 * Write the word at physical address paddr. Memory should not be cache inhibited.
239 */
240
241 ; Force a line boundry here
242 .align 5
243 .globl EXT(ml_phys_write)
244
245 LEXT(ml_phys_write)
246
247 mfmsr r0 ; Save the current MSR
248 rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions
249 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation
250
251 mtmsr r5 ; Translation and all off
252 isync ; Toss prefetch
253
254 stw r4,0(r3) ; Set the word
255 sync
256
257 mtmsr r0 ; Restore translation and rupts
258 isync
259 blr
260
261
262 /* set interrupts enabled or disabled
263 *
264 * boolean_t set_interrupts_enabled(boolean_t enable)
265 *
266 * Set EE bit to "enable" and return old value as boolean
267 */
268
269 ; Force a line boundry here
270 .align 5
271 .globl EXT(ml_set_interrupts_enabled)
272
273 LEXT(ml_set_interrupts_enabled)
274
275 mfsprg r7,0
276 lwz r4,PP_INTS_ENABLED(r7)
277 mr. r4,r4
278 beq- EXT(fake_set_interrupts_enabled)
279 mfmsr r5 ; Get the current MSR
280 mr r4,r3 ; Save the old value
281 rlwinm r3,r5,17,31,31 ; Set return value
282 rlwimi r5,r4,15,16,16 ; Insert new EE bit
283 andi. r8,r5,lo16(MASK(MSR_EE)) ; Interruptions
284 bne CheckPreemption
285 NoPreemption:
286 mtmsr r5 ; Slam enablement
287 blr
288
289 CheckPreemption:
290 lwz r8,PP_NEED_AST(r7)
291 lwz r7,PP_CPU_DATA(r7)
292 li r6,AST_URGENT
293 lwz r8,0(r8)
294 lwz r7,CPU_PREEMPTION_LEVEL(r7)
295 lis r0,HIGH_ADDR(DoPreemptCall)
296 and. r8,r8,r6
297 ori r0,r0,LOW_ADDR(DoPreemptCall)
298 beq+ NoPreemption
299 cmpi cr0, r7, 0
300 bne+ NoPreemption
301 sc
302 mtmsr r5
303 blr
304
305
306 /* Emulate a decremeter exception
307 *
308 * void machine_clock_assist(void)
309 *
310 */
311
312 ; Force a line boundry here
313 .align 5
314 .globl EXT(machine_clock_assist)
315
316 LEXT(machine_clock_assist)
317
318 mfsprg r7,0
319 lwz r4,PP_INTS_ENABLED(r7)
320 mr. r4,r4
321 beq- EXT(CreateFakeDEC)
322 blr
323
324 /* Set machine into idle power-saving mode.
325 *
326 * void machine_idle_ppc(void)
327 *
328 * We will use the PPC NAP or DOZE for this.
329 * This call always returns. Must be called with spllo (i.e., interruptions
330 * enabled).
331 *
332 */
333
334
335 ; Force a line boundry here
336 .align 5
337 .globl EXT(machine_idle_ppc)
338
339 LEXT(machine_idle_ppc)
340
341 mfmsr r3 ; Get the current MSR
342 rlwinm r5,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
343 mtmsr r5 ; Hold up interruptions for now
344 mfsprg r12,0 ; Get the per_proc_info
345 mfspr r6,hid0 ; Get the current power-saving mode
346 mfsprg r11,2 ; Get CPU specific features
347 rlwinm r6,r6,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though)
348 mtcrf 0xC7,r11 ; Get the facility flags
349
350 lis r4,hi16(napm) ; Assume we can nap
351 bt pfWillNapb,yesnap ; Yeah, nap is ok...
352
353 lis r4,hi16(dozem) ; Assume we can doze
354 bt pfCanDozeb,yesnap ; We can sleep or doze one this machine...
355
356 ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE
357 mtmsr r3 ; Turn interruptions back on
358 blr ; Leave...
359
360 yesnap: mftbu r9 ; Get the upper timebase
361 mftb r7 ; Get the lower timebase
362 mftbu r8 ; Get the upper one again
363 cmplw r9,r8 ; Did the top tick?
364 bne- yesnap ; Yeah, need to get it again...
365 stw r8,napStamp(r12) ; Set high order time stamp
366 stw r7,napStamp+4(r12) ; Set low order nap stamp
367
368
369 ;
370 ; We have to open up interruptions here because book 4 says that we should
371 ; turn on only the POW bit and that we should have interrupts enabled
372 ; The interrupt handler will detect that nap or doze is set if an interrupt
373 ; is taken and set everything up to return directly to machine_idle_ret.
374 ; So, make sure everything we need there is already set up...
375 ;
376 ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE
377 or r6,r6,r4 ; Set nap or doze
378 oris r5,r7,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
379 mtspr hid0,r6 ; Set up the HID for nap/doze
380 isync ; Make sure it is set
381 mtmsr r7 ; Enable for interrupts
382 rlwinm. r11,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec?
383 beq- minovec ; No...
384 dssall ; Stop the streams before we nap/doze
385
386 minovec: sync ; Make sure queues are clear
387 mtmsr r5 ; Nap or doze
388 isync ; Make sure this takes before we proceed
389 b minovec ; loop if POW does not take
390 ;
391 ; Note that the interrupt handler will turn off the nap/doze bits in the hid.
392 ; Also remember that the interrupt handler will force return to here whenever
393 ; the nap/doze bits are set.
394 ;
395 .globl EXT(machine_idle_ret)
396 LEXT(machine_idle_ret)
397 mtmsr r7 ; Make sure the MSR is what we want
398 isync ; In case we turn on translation
399
400 blr ; Return...
401
402 /* Put machine to sleep.
403 * This call never returns. We always exit sleep via a soft reset.
404 * All external interruptions must be drained at this point and disabled.
405 *
406 * void ml_ppc_sleep(void)
407 *
408 * We will use the PPC SLEEP for this.
409 *
410 * There is one bit of hackery in here: we need to enable for
411 * interruptions when we go to sleep and there may be a pending
412 * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for
413 * interruptions. The decrimenter rupt vector recognizes this and returns
414 * directly back here.
415 *
416 */
417
418 ; Force a line boundry here
419 .align 5
420 .globl EXT(ml_ppc_sleep)
421
422 LEXT(ml_ppc_sleep)
423
424 #if 0
425 mfmsr r5 ; Hack to spin instead of sleep
426 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
427 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
428 mtmsr r5 ; No talking
429 isync
430
431 ; No interrupts allowed after we get the savearea
432
433 mfsprg r6,0 ; Get the per_proc
434 mfsprg r7,1 ; Get the pending savearea
435 stw r7,savedSave(r6) ; Save the savearea for when we wake up
436
437 deadsleep: addi r3,r3,1 ; Make analyzer happy
438 addi r3,r3,1
439 addi r3,r3,1
440 b deadsleep ; Die the death of 1000 joys...
441 #endif
442
443 mfsprg r12,0 ; Get the per_proc_info
444 mfspr r4,hid0 ; Get the current power-saving mode
445 eqv r10,r10,r10 ; Get all foxes
446 mfsprg r11,2 ; Get CPU specific features
447 mfmsr r5 ; Get the current MSR
448 rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF
449 rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though)
450 mtdec r10 ; Load decrimenter with 0x7FFFFFFF
451 isync ; and make sure,
452 mfdec r9 ; really sure, it gets there
453
454 mtcrf 0x07,r11 ; Get the cache flags, etc
455
456 oris r4,r4,hi16(sleepm) ; Set sleep
457 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
458 ;
459 ; Note that we need translation off before we set the HID to sleep. Otherwise
460 ; we will ignore any PTE misses that occur and cause an infinite loop.
461 ;
462 bt pfNoMSRirb,mpsNoMSR ; No MSR...
463
464 mtmsr r5 ; Translation off
465 isync ; Toss prefetch
466 b mpsNoMSRx
467
468 mpsNoMSR:
469 li r0,loadMSR ; Get the MSR setter SC
470 mr r3,r5 ; Get new MSR
471 sc ; Set it
472 mpsNoMSRx:
473
474 ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE
475 sync
476 mtspr hid0,r4 ; Set up the HID to sleep
477
478 mtmsr r3 ; Enable for interrupts to drain decrimenter
479
480 add r6,r4,r5 ; Just waste time
481 add r6,r6,r4 ; A bit more
482 add r6,r6,r5 ; A bit more
483
484 mtmsr r5 ; Interruptions back off
485 isync ; Toss prefetch
486
487 mfsprg r7,1 ; Get the pending savearea
488 stw r7,savedSave(r12) ; Save the savearea for when we wake up
489
490 ;
491 ; We are here with translation off, interrupts off, all possible
492 ; interruptions drained off, and a decrimenter that will not pop.
493 ;
494
495 bl EXT(cacheInit) ; Clear out the caches. This will leave them on
496 bl EXT(cacheDisable) ; Turn off all caches
497
498 mfmsr r5 ; Get the current MSR
499 oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
500 ; Leave EE off because power goes off shortly
501
502 slSleepNow: sync ; Sync it all up
503 mtmsr r5 ; Do sleep with interruptions enabled
504 isync ; Take a pill
505 b slSleepNow ; Go back to sleep if we wake up...
506
507
508
509 /* Initialize all caches including the TLBs
510 *
511 * void cacheInit(void)
512 *
513 * This is used to force the caches to an initial clean state. First, we
514 * check if the cache is on, if so, we need to flush the contents to memory.
515 * Then we invalidate the L1. Next, we configure and invalidate the L2 etc.
516 * Finally we turn on all of the caches
517 *
518 * Note that if translation is not disabled when this is called, the TLB will not
519 * be completely clear after return.
520 *
521 */
522
523 ; Force a line boundry here
524 .align 5
525 .globl EXT(cacheInit)
526
527 LEXT(cacheInit)
528
529 mfsprg r12,0 ; Get the per_proc_info
530 mfspr r9,hid0 ; Get the current power-saving mode
531
532 mfsprg r11,2 ; Get CPU specific features
533 mfmsr r7 ; Get the current MSR
534 rlwinm r4,r9,0,dpm+1,doze-1 ; Clear all possible power-saving modes (also disable DPM)
535 rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs)
536 rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
537 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
538 mtcrf 0x87,r11 ; Get the feature flags
539 mtspr hid0,r4 ; Set up the HID
540
541 bt pfNoMSRirb,ciNoMSR ; No MSR...
542
543 mtmsr r5 ; Translation and all off
544 isync ; Toss prefetch
545 b ciNoMSRx
546
547 ciNoMSR:
548 li r0,loadMSR ; Get the MSR setter SC
549 mr r3,r5 ; Get new MSR
550 sc ; Set it
551 ciNoMSRx:
552
553 bf pfAltivecb,cinoDSS ; No Altivec here...
554
555 dssall ; Stop streams
556 sync
557
558 cinoDSS: lis r5,hi16(EXT(tlb_system_lock)) ; Get the TLBIE lock
559 li r0,128 ; Get number of TLB entries
560 ori r5,r5,lo16(EXT(tlb_system_lock)) ; Grab up the bottom part
561
562 li r6,0 ; Start at 0
563
564 citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock
565 mr. r2,r2 ; Is it locked?
566 bne- citlbhang ; It is locked, go wait...
567 stwcx. r0,0,r5 ; Try to get it
568 bne- citlbhang ; We was beat...
569
570 mtctr r0 ; Set the CTR
571
572 cipurgeTLB: tlbie r6 ; Purge this entry
573 addi r6,r6,4096 ; Next page
574 bdnz cipurgeTLB ; Do them all...
575
576 mtcrf 0x80,r11 ; Set SMP capability
577 sync ; Make sure all TLB purges are done
578 eieio ; Order, order in the court
579
580 bf pfSMPcapb,cinoSMP ; SMP incapable...
581
582 tlbsync ; Sync all TLBs
583 sync
584 isync
585
586 cinoSMP: stw r2,0(r5) ; Unlock TLBIE lock
587
588 cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache
589 bf- cr0_eq,cinoL1 ; No level 1 to flush...
590 rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on?
591 beq- cinoL1 ; No, no need to flush...
592
593 bf pfL1fab,ciswdl1 ; If no hw flush assist, go do by software...
594
595 mfspr r8,msscr0 ; Get the memory system control register
596 oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request
597
598 mtspr msscr0,r8 ; Start the flush operation
599
600 ciwdl1f: mfspr r8,msscr0 ; Get the control register again
601
602 rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet?
603 bne ciwdl1f ; No, flush is still in progress...
604 b ciinvdl1 ; Go invalidate l1...
605
606 ;
607 ; We need to either make this very complicated or to use ROM for
608 ; the flush. The problem is that if during the following sequence a
609 ; snoop occurs that invalidates one of the lines in the cache, the
610 ; PLRU sequence will be altered making it possible to miss lines
611 ; during the flush. So, we either need to dedicate an area of RAM
612 ; to each processor, lock use of a RAM area, or use ROM. ROM is
613 ; by far the easiest. Note that this is not an issue for machines
614 ; that have harware flush assists.
615 ;
616
617 ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size
618
619 bf 31,cisnlck ; Skip if pfLClck not set...
620
621 mfspr r4,msscr0 ; ?
622 rlwinm r6,r4,0,0,l2pfes-1 ; ?
623 mtspr msscr0,r6 ; Set it
624 sync
625 isync
626
627 mfspr r8,ldstcr ; Save the LDSTCR
628 li r2,1 ; Get a mask of 0x01
629 lis r3,0xFFF0 ; Point to ROM
630 rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes
631
632 li r6,0 ; Start here
633
634 cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use
635 addi r6,r6,32 ; Bump to the next
636 cmplw r6,r0 ; Have we reached the end?
637 blt+ cisiniflsh ; Nope, continue initial flush...
638
639 sync ; Make sure it is done
640
641 addi r11,r11,-1 ; Get mask for index wrap
642 li r6,0 ; Get starting offset
643
644 cislckit: not r5,r2 ; Lock all but 1 way
645 rlwimi r5,r8,0,0,23 ; Build LDSTCR
646 mtspr ldstcr,r5 ; Lock a way
647 sync ; Clear out memory accesses
648 isync ; Wait for all
649
650
651 cistouch: lwzx r10,r3,r6 ; Pick up some trash
652 addi r6,r6,32 ; Go to the next index
653 and. r0,r6,r11 ; See if we are about to do next index
654 bne+ cistouch ; Nope, do more...
655
656 sync ; Make sure it is all done
657 isync
658
659 sub r6,r6,r11 ; Back up to start + 1
660 addi r6,r6,-1 ; Get it right
661
662 cisflush: dcbf r3,r6 ; Flush everything out
663 addi r6,r6,32 ; Go to the next index
664 and. r0,r6,r11 ; See if we are about to do next index
665 bne+ cisflush ; Nope, do more...
666
667 sync ; Make sure it is all done
668 isync
669
670
671 rlwinm. r2,r2,1,24,31 ; Shift to next way
672 bne+ cislckit ; Do this for all ways...
673
674 mtspr ldstcr,r8 ; Slam back to original
675 sync
676 isync
677
678 mtspr msscr0,r4 ; ?
679 sync
680 isync
681
682 b cinoL1 ; Go on to level 2...
683
684
685 cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size
686 add r0,r0,r2 ; Get 3 times cache size
687 rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines
688 lis r3,0xFFF0 ; Dead recon ROM address for now
689 mtctr r0 ; Number of lines to flush
690
691 ciswfldl1a: lwz r2,0(r3) ; Flush anything else
692 addi r3,r3,32 ; Next line
693 bdnz ciswfldl1a ; Flush the lot...
694
695 ciinvdl1: sync ; Make sure all flushes have been committed
696
697 mfspr r8,hid0 ; Get the HID0 bits
698 rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables
699 mtspr hid0,r8 ; and turn off L1 cache
700 sync ; Make sure all is done
701 isync
702
703 ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
704 sync
705 isync
706
707 mtspr hid0,r8 ; Start the invalidate and turn on cache
708 rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
709 mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines)
710 sync
711
712
713 cinoL1:
714 ;
715 ; Flush and disable the level 2
716 ;
717 bf pfL2b,cinol2 ; No level 2 cache to flush
718
719 mfspr r8,l2cr ; Get the L2CR
720 lwz r3,pfl2cr(r12) ; Get the L2CR value
721 rlwinm. r0,r8,0,l2e,l2e ; Was the L2 enabled?
722 bne ciflushl2 ; Yes, force flush
723 cmplwi r8, 0 ; Was the L2 all the way off?
724 beq ciinvdl2 ; Yes, force invalidate
725 lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits
726 xor r2,r8,r3 ; Get changing bits?
727 ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits
728 and. r0,r0,r2 ; Did any change?
729 bne- ciinvdl2 ; Yes, just invalidate and get PLL synced...
730
731 ciflushl2:
732 bf pfL2fab,ciswfl2 ; Flush not in hardware...
733
734 mr r10,r8 ; Take a copy now
735
736 bf 31,cinol2lck ; Skip if pfLClck not set...
737
738 oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only
739 sync
740 mtspr l2cr,r10 ; Lock out the cache
741 sync
742 isync
743
744 cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush
745 sync ; Make sure everything is done
746
747 mtspr l2cr,r10 ; Request flush
748
749 cihwfl2: mfspr r10,l2cr ; Get back the L2CR
750 rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over?
751 bne+ cihwfl2 ; Nope, keep going...
752 b ciinvdl2 ; Flush done, go invalidate L2...
753
754 ciswfl2:
755 lwz r0,pfl2Size(r12) ; Get the L2 size
756 oris r2,r8,hi16(l2dom) ; Set L2 to data only mode
757
758 b ciswfl2doa ; Branch to next line...
759
760 .align 5
761 ciswfl2doc:
762 mtspr l2cr,r2 ; Disable L2
763 sync
764 isync
765 b ciswfl2dod ; It is off, go invalidate it...
766
767 ciswfl2doa:
768 b ciswfl2dob ; Branch to next...
769
770 ciswfl2dob:
771 sync ; Finish memory stuff
772 isync ; Stop speculation
773 b ciswfl2doc ; Jump back up and turn on data only...
774 ciswfl2dod:
775 rlwinm r0,r0,27,5,31 ; Get the number of lines
776 lis r10,0xFFF0 ; Dead recon ROM for now
777 mtctr r0 ; Set the number of lines
778
779 ciswfldl2a: lwz r0,0(r10) ; Load something to flush something
780 addi r10,r10,32 ; Next line
781 bdnz ciswfldl2a ; Do the lot...
782
783 ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Use the saved L2CR and clear the enable bit
784 b cinla ; Branch to next line...
785
786 .align 5
787 cinlc: mtspr l2cr,r8 ; Disable L2
788 sync
789 isync
790 b ciinvl2 ; It is off, go invalidate it...
791
792 cinla: b cinlb ; Branch to next...
793
794 cinlb: sync ; Finish memory stuff
795 isync ; Stop speculation
796 b cinlc ; Jump back up and turn off cache...
797
798 ciinvl2: sync
799 isync
800
801 cmplwi r3, 0 ; Should the L2 be all the way off?
802 beq cinol2 ; Yes, done with L2
803
804 oris r2,r8,hi16(l2im) ; Get the invalidate flag set
805
806 mtspr l2cr,r2 ; Start the invalidate
807 sync
808 isync
809 ciinvdl2a: mfspr r2,l2cr ; Get the L2CR
810 bf pfL2ib,ciinvdl2b ; Flush not in hardware...
811 rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going?
812 bne+ ciinvdl2a ; Assume so, this will take a looong time...
813 sync
814 b cinol2 ; No level 2 cache to flush
815 ciinvdl2b:
816 rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going?
817 bne+ ciinvdl2a ; Assume so, this will take a looong time...
818 sync
819 mtspr l2cr,r8 ; Turn off the invalidate request
820
821 cinol2:
822
823 ;
824 ; Flush and enable the level 3
825 ;
826 bf pfL3b,cinol3 ; No level 3 cache to flush
827
828 mfspr r8,l3cr ; Get the L3CR
829 lwz r3,pfl3cr(r12) ; Get the L3CR value
830 rlwinm. r0,r8,0,l3e,l3e ; Was the L3 enabled?
831 bne ciflushl3 ; Yes, force flush
832 cmplwi r8, 0 ; Was the L3 all the way off?
833 beq ciinvdl3 ; Yes, force invalidate
834 lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits
835 xor r2,r8,r3 ; Get changing bits?
836 ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits
837 and. r0,r0,r2 ; Did any change?
838 bne- ciinvdl3 ; Yes, just invalidate and get PLL synced...
839
840 ciflushl3:
841 sync ; 7450 book says do this even though not needed
842 mr r10,r8 ; Take a copy now
843
844 bf 31,cinol3lck ; Skip if pfL23lck not set...
845
846 oris r10,r10,hi16(l3iom) ; Set instruction-only
847 ori r10,r10,lo16(l3donlym) ; Set data-only
848 sync
849 mtspr l3cr,r10 ; Lock out the cache
850 sync
851 isync
852
853 cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush
854 sync ; Make sure everything is done
855
856 mtspr l3cr,r10 ; Request flush
857
858 cihwfl3: mfspr r10,l3cr ; Get back the L3CR
859 rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over?
860 bne+ cihwfl3 ; Nope, keep going...
861
862 ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Use saved L3CR value and clear the enable bit
863 sync ; Make sure of life, liberty, and justice
864 mtspr l3cr,r8 ; Disable L3
865 sync
866
867 cmplwi r3, 0 ; Should the L3 be all the way off?
868 beq cinol3 ; Yes, done with L3
869
870 ori r8,r8,lo16(l3im) ; Get the invalidate flag set
871
872 mtspr l3cr,r8 ; Start the invalidate
873
874 ciinvdl3b: mfspr r8,l3cr ; Get the L3CR
875 rlwinm. r8,r8,0,l3i,l3i ; Is the invalidate still going?
876 bne+ ciinvdl3b ; Assume so...
877 sync
878
879 lwz r10, pfBootConfig(r12) ; ?
880 rlwinm. r10, r10, 24, 28, 31 ; ?
881 beq ciinvdl3nopdet ; ?
882
883 mfspr r8,l3pdet ; ?
884 srw r2, r8, r10 ; ?
885 rlwimi r2, r8, 0, 24, 31 ; ?
886 subfic r10, r10, 32 ; ?
887 li r8, -1 ; ?
888 ori r2, r2, 0x0080 ; ?
889 slw r8, r8, r10 ; ?
890 or r8, r2, r8 ; ?
891 mtspr l3pdet, r8 ; ?
892 isync
893
894 ciinvdl3nopdet:
895 mfspr r8,l3cr ; Get the L3CR
896 rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
897 mtspr l3cr,r8 ; Disable the clock
898
899 li r2,128 ; ?
900 ciinvdl3c: addi r2,r2,-1 ; ?
901 cmplwi r2,0 ; ?
902 bne+ ciinvdl3c
903
904 mfspr r10,msssr0 ; ?
905 rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ?
906 mtspr msssr0,r10 ; ?
907 sync
908
909 mtspr l3cr,r3 ; Enable it as desired
910 sync
911 cinol3:
912 bf pfL2b,cinol2a ; No level 2 cache to enable
913
914 lwz r3,pfl2cr(r12) ; Get the L2CR value
915 cmplwi r3, 0 ; Should the L2 be all the way off?
916 beq cinol2a : Yes, done with L2
917 mtspr l2cr,r3 ; Enable it as desired
918 sync
919
920 ;
921 ; Invalidate and turn on L1s
922 ;
923
924 cinol2a:
925 bt 31,cinoexit ; Skip if pfLClck set...
926
927 rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables
928 mtspr hid0,r8 ; Turn off dem caches
929 sync
930
931 ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
932 rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
933 sync
934 isync
935
936 mtspr hid0,r8 ; Start the invalidate and turn on L1 cache
937
938 cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions
939 sync
940 mtmsr r7 ; Restore MSR to entry
941 isync
942 blr ; Return...
943
944
945 /* Disables all caches
946 *
947 * void cacheDisable(void)
948 *
949 * Turns off all caches on the processor. They are not flushed.
950 *
951 */
952
953 ; Force a line boundry here
954 .align 5
955 .globl EXT(cacheDisable)
956
957 LEXT(cacheDisable)
958
959 mfsprg r11,2 ; Get CPU specific features
960 mtcrf 0x83,r11 ; Set feature flags
961
962 bf pfAltivecb,cdNoAlt ; No vectors...
963
964 dssall ; Stop streams
965
966 cdNoAlt: sync
967
968 mfspr r5,hid0 ; Get the hid
969 rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables
970 mtspr hid0,r5 ; Turn off dem caches
971 sync
972
973 bf pfL2b,cdNoL2 ; Skip if no L2...
974
975 mfspr r5,l2cr ; Get the L2
976 rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit
977
978 b cinlaa ; Branch to next line...
979
980 .align 5
981 cinlcc: mtspr l2cr,r5 ; Disable L2
982 sync
983 isync
984 b cdNoL2 ; It is off, we are done...
985
986 cinlaa: b cinlbb ; Branch to next...
987
988 cinlbb: sync ; Finish memory stuff
989 isync ; Stop speculation
990 b cinlcc ; Jump back up and turn off cache...
991
992 cdNoL2:
993
994 bf pfL3b,cdNoL3 ; Skip down if no L3...
995
996 mfspr r5,l3cr ; Get the L3
997 rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit
998 rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit
999 mtspr l3cr,r5 ; Disable the caches
1000 sync
1001
1002 cdNoL3:
1003 blr ; Leave...
1004
1005
1006 /* Initialize processor thermal monitoring
1007 * void ml_thrm_init(void)
1008 *
1009 * Build initial TAU registers and start them all going.
1010 * We ca not do this at initial start up because we need to have the processor frequency first.
1011 * And just why is this in assembler when it does not have to be?? Cause I am just too
1012 * lazy to open up a "C" file, thats why.
1013 */
1014
1015 ; Force a line boundry here
1016 .align 5
1017 .globl EXT(ml_thrm_init)
1018
1019 LEXT(ml_thrm_init)
1020
1021 mfsprg r12,0 ; Get the per_proc blok
1022 lis r11,hi16(EXT(gPEClockFrequencyInfo)) ; Get top of processor information
1023 mfsprg r10,2 ; Get CPU specific features
1024 ori r11,r11,lo16(EXT(gPEClockFrequencyInfo)) ; Get bottom of processor information
1025 mtcrf 0x40,r10 ; Get the installed features
1026
1027 li r3,lo16(thrmtidm|thrmvm) ; Set for lower-than thermal event at 0 degrees
1028 bflr pfThermalb ; No thermal monitoring on this cpu
1029 mtspr thrm1,r3 ; Do it
1030
1031 lwz r3,thrmthrottleTemp(r12) ; Get our throttle temprature
1032 rlwinm r3,r3,31-thrmthre,thrmthrs,thrmthre ; Position it
1033 ori r3,r3,lo16(thrmvm) ; Set for higher-than event
1034 mtspr thrm2,r3 ; Set it
1035
1036 lis r4,hi16(1000000) ; Top of million
1037 ;
1038 ; Note: some CPU manuals say this is processor clocks, some say bus rate. The latter
1039 ; makes more sense because otherwise we can not get over about 400MHz.
1040 #if 0
1041 lwz r3,PECFIcpurate(r11) ; Get the processor speed
1042 #else
1043 lwz r3,PECFIbusrate(r11) ; Get the bus speed
1044 #endif
1045 ori r4,r4,lo16(1000000) ; Bottom of million
1046 lis r7,hi16(thrmsitvm>>1) ; Get top of highest possible value
1047 divwu r3,r3,r4 ; Get number of cycles per microseconds
1048 ori r7,r7,lo16(thrmsitvm>>1) ; Get the bottom of the highest possible value
1049 addi r3,r3,1 ; Insure we have enough
1050 mulli r3,r3,20 ; Get 20 microseconds worth of cycles
1051 cmplw r3,r7 ; Check against max
1052 ble+ smallenuf ; It is ok...
1053 mr r3,r7 ; Saturate
1054
1055 smallenuf: rlwinm r3,r3,31-thrmsitve,thrmsitvs,thrmsitve ; Position
1056 ori r3,r3,lo16(thrmem) ; Enable with at least 20micro sec sample
1057 stw r3,thrm3val(r12) ; Save this in case we need it later
1058 mtspr thrm3,r3 ; Do it
1059 blr
1060
1061
1062 /* Set thermal monitor bounds
1063 * void ml_thrm_set(unsigned int low, unsigned int high)
1064 *
1065 * Set TAU to interrupt below low and above high. A value of
1066 * zero disables interruptions in that direction.
1067 */
1068
1069 ; Force a line boundry here
1070 .align 5
1071 .globl EXT(ml_thrm_set)
1072
1073 LEXT(ml_thrm_set)
1074
1075 mfmsr r0 ; Get the MSR
1076 rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE bit
1077 mtmsr r6
1078
1079 mfsprg r12,0 ; Get the per_proc blok
1080
1081 rlwinm. r6,r3,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled
1082 mfsprg r9,2 ; Get CPU specific features
1083 stw r3,thrmlowTemp(r12) ; Set the low temprature
1084 mtcrf 0x40,r9 ; See if we can thermal this machine
1085 rlwinm r9,r9,(((31-thrmtie)+(pfThermIntb+1))&31),thrmtie,thrmtie ; Set interrupt enable if this machine can handle it
1086 bf pfThermalb,tsetcant ; No can do...
1087 beq tsetlowo ; We are setting the low off...
1088 ori r6,r6,lo16(thrmtidm|thrmvm) ; Set the lower-than and valid bit
1089 or r6,r6,r9 ; Set interruption request if supported
1090
1091 tsetlowo: mtspr thrm1,r6 ; Cram the register
1092
1093 rlwinm. r6,r4,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled
1094 stw r4,thrmhighTemp(r12) ; Set the high temprature
1095 beq tsethigho ; We are setting the high off...
1096 ori r6,r6,lo16(thrmvm) ; Set valid bit
1097 or r6,r6,r9 ; Set interruption request if supported
1098
1099 tsethigho: mtspr thrm2,r6 ; Cram the register
1100
1101 tsetcant: mtmsr r0 ; Reenable interruptions
1102 blr ; Leave...
1103
1104 /* Read processor temprature
1105 * unsigned int ml_read_temp(void)
1106 *
1107 */
1108
1109 ; Force a line boundry here
1110 .align 5
1111 .globl EXT(ml_read_temp)
1112
1113 LEXT(ml_read_temp)
1114
1115 mfmsr r9 ; Save the MSR
1116 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1117 li r5,15 ; Starting point for ranging (start at 15 so we do not overflow)
1118 mfsprg r7,2 ; Get CPU specific features
1119 mtmsr r8 ; Do not allow interruptions
1120 mtcrf 0x40,r7 ; See if we can thermal this machine
1121 bf pfThermalb,thrmcant ; No can do...
1122
1123 mfspr r11,thrm1 ; Save thrm1
1124
1125 thrmrange: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it
1126 ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than
1127
1128 mtspr thrm1,r4 ; Set the test value
1129
1130 thrmreada: mfspr r3,thrm1 ; Get the thermal register back
1131 rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet?
1132 beq+ thrmreada ; Nope...
1133
1134 rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold?
1135 bne thrmsearch ; No, we went over...
1136
1137 addi r5,r5,16 ; Start by trying every 16 degrees
1138 cmplwi r5,127 ; Have we hit the max?
1139 blt- thrmrange ; Got some more to do...
1140
1141 thrmsearch: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it
1142 ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than
1143
1144 mtspr thrm1,r4 ; Set the test value
1145
1146 thrmread: mfspr r3,thrm1 ; Get the thermal register back
1147 rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet?
1148 beq+ thrmread ; Nope...
1149
1150 rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold?
1151 beq thrmdone ; No, we hit it...
1152 addic. r5,r5,-1 ; Go down a degree
1153 bge+ thrmsearch ; Try again (until we are below freezing)...
1154
1155 thrmdone: addi r3,r5,1 ; Return the temprature (bump it up to make it correct)
1156 mtspr thrm1,r11 ; Restore the thermal register
1157 mtmsr r9 ; Re-enable interruptions
1158 blr ; Leave...
1159
1160 thrmcant: eqv r3,r3,r3 ; Return bogus temprature because we can not read it
1161 mtmsr r9 ; Re-enable interruptions
1162 blr ; Leave...
1163
1164 /* Throttle processor speed up or down
1165 * unsigned int ml_throttle(unsigned int step)
1166 *
1167 * Returns old speed and sets new. Both step and return are values from 0 to
1168 * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2.
1169 *
1170 */
1171
1172 ; Force a line boundry here
1173 .align 5
1174 .globl EXT(ml_throttle)
1175
1176 LEXT(ml_throttle)
1177
1178 mfmsr r9 ; Save the MSR
1179 rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1180 cmplwi r3,lo16(ictcfim>>1) ; See if we are going too far
1181 mtmsr r8 ; Do not allow interruptions
1182 ble+ throtok ; Throttle value is ok...
1183 li r3,lo16(ictcfim>>1) ; Set max
1184
1185 throtok: rlwinm. r4,r3,1,ictcfib,ictcfie ; Set the throttle
1186 beq throtoff ; Skip if we are turning it off...
1187 ori r4,r4,lo16(thrmvm) ; Turn on the valid bit
1188
1189 throtoff: mfspr r3,ictc ; Get the old throttle
1190 mtspr ictc,r4 ; Set the new
1191 rlwinm r3,r3,31,1,31 ; Shift throttle value over
1192 mtmsr r9 ; Restore interruptions
1193 blr ; Return...
1194
1195 /*
1196 ** ml_get_timebase()
1197 **
1198 ** Entry - R3 contains pointer to 64 bit structure.
1199 **
1200 ** Exit - 64 bit structure filled in.
1201 **
1202 */
1203 ; Force a line boundry here
1204 .align 5
1205 .globl EXT(ml_get_timebase)
1206
1207 LEXT(ml_get_timebase)
1208
1209 loop:
1210 mftbu r4
1211 mftb r5
1212 mftbu r6
1213 cmpw r6, r4
1214 bne- loop
1215
1216 stw r4, 0(r3)
1217 stw r5, 4(r3)
1218
1219 blr
1220
1221 /*
1222 ** ml_sense_nmi()
1223 **
1224 */
1225 ; Force a line boundry here
1226 .align 5
1227 .globl EXT(ml_sense_nmi)
1228
1229 LEXT(ml_sense_nmi)
1230
1231 blr ; Leave...
1232
1233 /*
1234 ** ml_set_processor_speed()
1235 **
1236 */
1237 ; Force a line boundry here
1238 .align 5
1239 .globl EXT(ml_set_processor_speed)
1240
1241 LEXT(ml_set_processor_speed)
1242 blr