]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/lowmem_vectors.s
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / ppc / lowmem_vectors.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <assym.s>
27 #include <debug.h>
28 #include <cpus.h>
29 #include <db_machine_commands.h>
30
31 #include <mach_debug.h>
32 #include <ppc/asm.h>
33 #include <ppc/proc_reg.h>
34 #include <ppc/exception.h>
35 #include <ppc/Performance.h>
36 #include <ppc/savearea.h>
37 #include <mach/ppc/vm_param.h>
38
39 #define ESPDEBUG 0
40 #define INSTRUMENT 0
41
42 #define featAltivec 29
43 #define wasNapping 30
44
45 #define VECTOR_SEGMENT .section __VECTORS, __interrupts
46
47 VECTOR_SEGMENT
48
49
50 .globl EXT(ExceptionVectorsStart)
51
52 EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */
53 baseR: /* Used so we have more readable code */
54
55 ;
56 ; Handle system reset.
57 ; We do not ever expect a hard reset so we do not actually check.
58 ; When we come here, we check for a RESET_HANDLER_START (which means we are
59 ; waking up from sleep), a RESET_HANDLER_BUPOR (which is using for bring up
60 ; when starting directly from a POR), and RESET_HANDLER_IGNORE (which means
61 ; ignore the interrupt).
62 ;
63 ; Some machines (so far, 32-bit guys) will always ignore a non-START interrupt.
64 ; The ones who do take it, check if the interrupt is too be ignored. This is
65 ; always the case until the previous reset is handled (i.e., we have exited
66 ; from the debugger).
67 ;
68 . = 0xf0
69 .globl EXT(ResetHandler)
70 EXT(ResetHandler):
71 .long 0x0
72 .long 0x0
73 .long 0x0
74
75 . = 0x100
76 .L_handler100:
77 mtsprg 2,r13 /* Save R13 */
78 mtsprg 3,r11 /* Save R11 */
79 lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type
80 mfcr r11
81 cmpi cr0,r13,RESET_HANDLER_START
82 bne resetexc
83
84 li r11,RESET_HANDLER_NULL
85 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type
86
87 lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0)
88 lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0)
89 mtlr r4
90 blr
91
92 resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence?
93 bne resetexc2 ; No...
94 lis r4,hi16(EXT(resetPOR)) ; Get POR code
95 ori r4,r4,lo16(EXT(resetPOR)) ; The rest
96 mtlr r4 ; Set it
97 blr ; Jump to it....
98
99 resetexc2: cmplwi cr1,r13,RESET_HANDLER_IGNORE ; Are we ignoring these? (Software debounce)
100
101 mfsprg r13,0 ; Get per_proc
102 lwz r13,pfAvailable(r13) ; Get the features
103 rlwinm. r13,r13,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine?
104 cror cr1_eq,cr0_eq,cr1_eq ; See if we want to take this
105 bne-- cr1,rxCont ; Yes, continue...
106 bne-- rxIg64 ; 64-bit path...
107
108 mtcr r11 ; Restore the CR
109 mfsprg r13,2 ; Restore R13
110 mfsprg r11,0 ; Get per_proc
111 lwz r11,pfAvailable(r11) ; Get the features
112 mtsprg 2,r11 ; Restore sprg2
113 mfsprg r11,3 ; Restore R11
114 rfi ; Return and ignore the reset
115
116 rxIg64: mtcr r11 ; Restore the CR
117 mfsprg r11,0 ; Get per_proc
118 mtspr hsprg0,r14 ; Save a register
119 lwz r14,UAW(r11) ; Get the User Assist Word
120 mfsprg r13,2 ; Restore R13
121 lwz r11,pfAvailable(r11) ; Get the features
122 mtsprg 2,r11 ; Restore sprg2
123 mfsprg r11,3 ; Restore R11
124 mtsprg 3,r14 ; Set the UAW in sprg3
125 mfspr r14,hsprg0 ; Restore R14
126 rfid ; Return and ignore the reset
127
128 rxCont: mtcr r11
129 li r11,RESET_HANDLER_IGNORE ; Get set to ignore
130 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Start ignoring these
131 mfsprg r13,1 /* Get the exception save area */
132 li r11,T_RESET /* Set 'rupt code */
133 b .L_exception_entry /* Join common... */
134
135 /*
136 * Machine check
137 */
138
139 . = 0x200
140 .L_handler200:
141 mtsprg 2,r13 ; Save R13
142 mtsprg 3,r11 ; Save R11
143
144 .globl EXT(extPatchMCK)
145 LEXT(extPatchMCK) ; This is patched to a nop for 64-bit
146 b h200aaa ; Skip 64-bit code...
147
148 ;
149 ; Fall through here for 970 MCKs.
150 ;
151
152 li r11,1 ; ?
153 sldi r11,r11,32+3 ; ?
154 mfspr r13,hid4 ; ?
155 or r11,r11,r13 ; ?
156 sync
157 mtspr hid4,r11 ; ?
158 isync
159 li r11,1 ; ?
160 sldi r11,r11,32+8 ; ?
161 andc r13,r13,r11 ; ?
162 lis r11,0xE000 ; Get the unlikeliest ESID possible
163 sync
164 mtspr hid4,r13 ; ?
165 isync ; ?
166
167 srdi r11,r11,1 ; ?
168 slbie r11 ; ?
169 sync
170 isync
171
172 li r11,T_MACHINE_CHECK ; Set rupt code
173 b .L_exception_entry ; Join common...
174
175 ;
176 ; Preliminary checking of other MCKs
177 ;
178
179 h200aaa: mfsrr1 r11 ; Get the SRR1
180 mfcr r13 ; Save the CR
181
182 rlwinm. r11,r11,0,dcmck,dcmck ; ?
183 beq+ notDCache ; ?
184
185 sync
186 mfspr r11,msscr0 ; ?
187 dssall ; ?
188 sync
189 isync
190
191 oris r11,r11,hi16(dl1hwfm) ; ?
192 mtspr msscr0,r11 ; ?
193
194 rstbsy: mfspr r11,msscr0 ; ?
195
196 rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ?
197 bne rstbsy ; ?
198
199 sync ; ?
200
201 mfsprg r11,0 ; Get the per_proc
202 mtcrf 255,r13 ; Restore CRs
203 lwz r13,hwMachineChecks(r11) ; Get old count
204 addi r13,r13,1 ; Count this one
205 stw r13,hwMachineChecks(r11) ; Set new count
206 lwz r11,pfAvailable(r11) ; Get the feature flags
207 mfsprg r13,2 ; Restore R13
208 mtsprg 2,r11 ; Set the feature flags
209 mfsprg r11,3 ; Restore R11
210 rfi ; Return
211
212 notDCache: mtcrf 255,r13 ; Restore CRs
213 li r11,T_MACHINE_CHECK ; Set rupt code
214 b .L_exception_entry ; Join common...
215
216
217 /*
218 * Data access - page fault, invalid memory rights for operation
219 */
220
221 . = 0x300
222 .L_handler300:
223 mtsprg 2,r13 /* Save R13 */
224 mtsprg 3,r11 /* Save R11 */
225 li r11,T_DATA_ACCESS /* Set 'rupt code */
226 b .L_exception_entry /* Join common... */
227
228
229 /*
230 * Data segment
231 */
232
233 . = 0x380
234 .L_handler380:
235 mtsprg 2,r13 ; Save R13
236 mtsprg 3,r11 ; Save R11
237 li r11,T_DATA_SEGMENT ; Set rupt code
238 b .L_exception_entry ; Join common...
239
240 /*
241 * Instruction access - as for data access
242 */
243
244 . = 0x400
245 .L_handler400:
246 mtsprg 2,r13 ; Save R13
247 mtsprg 3,r11 ; Save R11
248 li r11,T_INSTRUCTION_ACCESS ; Set rupt code
249 b .L_exception_entry ; Join common...
250
251 /*
252 * Instruction segment
253 */
254
255 . = 0x480
256 .L_handler480:
257 mtsprg 2,r13 ; Save R13
258 mtsprg 3,r11 ; Save R11
259 li r11,T_INSTRUCTION_SEGMENT ; Set rupt code
260 b .L_exception_entry ; Join common...
261
262 /*
263 * External interrupt
264 */
265
266 . = 0x500
267 .L_handler500:
268 mtsprg 2,r13 ; Save R13
269 mtsprg 3,r11 ; Save R11
270 li r11,T_INTERRUPT ; Set rupt code
271 b .L_exception_entry ; Join common...
272
273 /*
274 * Alignment - many reasons
275 */
276
277 . = 0x600
278 .L_handler600:
279 mtsprg 2,r13 /* Save R13 */
280 mtsprg 3,r11 /* Save R11 */
281 li r11,T_ALIGNMENT|T_FAM /* Set 'rupt code */
282 b .L_exception_entry /* Join common... */
283
284 /*
285 * Program - floating point exception, illegal inst, priv inst, user trap
286 */
287
288 . = 0x700
289 .L_handler700:
290 mtsprg 2,r13 /* Save R13 */
291 mtsprg 3,r11 /* Save R11 */
292
293 #if 0
294 mfsrr1 r13 ; (BRINGUP)
295 mfcr r11 ; (BRINGUP)
296 rlwinm. r13,r13,0,12,12 ; (BRINGUP)
297 crmove cr1_eq,cr0_eq ; (BRINGUP)
298 mfsrr1 r13 ; (BRINGUP)
299 rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; (BRINGUP)
300 crorc cr0_eq,cr1_eq,cr0_eq ; (BRINGUP)
301 bf-- cr0_eq,. ; (BRINGUP)
302 mtcrf 255,r11 ; (BRINGUP)
303 #endif
304
305 li r11,T_PROGRAM|T_FAM /* Set 'rupt code */
306 b .L_exception_entry /* Join common... */
307
308 /*
309 * Floating point disabled
310 */
311
312 . = 0x800
313 .L_handler800:
314 mtsprg 2,r13 /* Save R13 */
315 mtsprg 3,r11 /* Save R11 */
316 li r11,T_FP_UNAVAILABLE /* Set 'rupt code */
317 b .L_exception_entry /* Join common... */
318
319
320 /*
321 * Decrementer - DEC register has passed zero.
322 */
323
324 . = 0x900
325 .L_handler900:
326 mtsprg 2,r13 /* Save R13 */
327 mtsprg 3,r11 /* Save R11 */
328 li r11,T_DECREMENTER /* Set 'rupt code */
329 b .L_exception_entry /* Join common... */
330
331 /*
332 * I/O controller interface error - MACH does not use this
333 */
334
335 . = 0xA00
336 .L_handlerA00:
337 mtsprg 2,r13 /* Save R13 */
338 mtsprg 3,r11 /* Save R11 */
339 li r11,T_IO_ERROR /* Set 'rupt code */
340 b .L_exception_entry /* Join common... */
341
342 /*
343 * Reserved
344 */
345
346 . = 0xB00
347 .L_handlerB00:
348 mtsprg 2,r13 /* Save R13 */
349 mtsprg 3,r11 /* Save R11 */
350 li r11,T_RESERVED /* Set 'rupt code */
351 b .L_exception_entry /* Join common... */
352
353 ;
354 ; System call - generated by the sc instruction
355 ;
356 ; We handle the ultra-fast traps right here. They are:
357 ;
358 ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask
359 ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv
360 ; 0x00007FF2 - User state only - thread info
361 ; 0x00007FF3 - User state only - floating point / vector facility status
362 ; 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines
363 ;
364 ; Note: none handled if virtual machine is running
365 ; Also, it we treat SCs as kernel SCs if the RI bit is set
366 ;
367
368 . = 0xC00
369 .L_handlerC00:
370 mtsprg 3,r11 ; Save R11
371 mfsprg r11,2 ; Get the feature flags
372
373 mtsprg 2,r13 ; Save R13
374 rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag
375 mfsrr1 r13 ; Get SRR1 for loadMSR
376 rlwimi r11,r13,MSR_PR_BIT-5,5,5 ; Move the PR bit to bit 1
377 mfcr r13 ; Save the CR
378
379 mtcrf 0x40,r11 ; Get the top 3 CR bits to 64-bit, PR, sign
380
381 cmpwi r0,lo16(-3) ; Eliminate all negatives but -1 and -2
382 mfsprg r11,0 ; Get the per_proc
383 bf-- 5,uftInKern ; We came from the kernel...
384 ble-- notufp ; This is a mach call
385
386 lwz r11,spcFlags(r11) ; Pick up the special flags
387
388 cmpwi cr7,r0,lo16(-1) ; Is this a BlueBox call?
389 cmplwi cr2,r0,0x7FF2 ; Ultra fast path cthread info call?
390 cmplwi cr3,r0,0x7FF3 ; Ultra fast path facility status?
391 cror cr4_eq,cr2_eq,cr3_eq ; Is this one of the two ufts we handle here?
392
393 ble-- cr7,uftBBCall ; We think this is blue box call...
394
395 rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
396 andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
397 cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
398 beq-- cr0,ufpVM ; fast paths running VM ...
399
400 bne-- cr4_eq,notufp ; Bail ifthis is not a uft...
401
402 ;
403 ; Handle normal user ultra-fast trap
404 ;
405
406 li r3,spcFlags ; Assume facility status - 0x7FF3
407
408 beq-- cr3,uftFacStat ; This is a facilities status call...
409
410 li r3,UAW ; This is really a thread info call - 0x7FF2
411
412 uftFacStat: mfsprg r11,0 ; Get the per_proc
413 lwzx r3,r11,r3 ; Get the UAW or spcFlags field
414
415 uftExit: bt++ 4,uftX64 ; Go do the 64-bit exit...
416
417 lwz r11,pfAvailable(r11) ; Get the feature flags
418 mtcrf 255,r13 ; Restore the CRs
419 mfsprg r13,2 ; Restore R13
420 mtsprg 2,r11 ; Set the feature flags
421 mfsprg r11,3 ; Restore R11
422
423 rfi ; Back to our guy...
424
425 uftX64: mtspr hsprg0,r14 ; Save a register
426
427 lwz r14,UAW(r11) ; Get the User Assist Word
428 lwz r11,pfAvailable(r11) ; Get the feature flags
429
430 mtcrf 255,r13 ; Restore the CRs
431
432 mfsprg r13,2 ; Restore R13
433 mtsprg 2,r11 ; Set the feature flags
434 mfsprg r11,3 ; Restore R11
435 mtsprg 3,r14 ; Set the UAW in sprg3
436 mfspr r14,hsprg0 ; Restore R14
437
438 rfid ; Back to our guy...
439
440 ;
441 ; Handle BlueBox ultra-fast trap
442 ;
443
444 uftBBCall: andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
445 cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
446 blt-- notufp ; No...
447
448 rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
449
450 mfsprg r11,0 ; Get the per proc
451
452 beq++ cr7,uftExit ; For MKIsPreemptiveTask we are done...
453
454 lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv from per_proc_area
455 b uftExit ; We are really all done now...
456
457 ; Kernel ultra-fast trap
458
459 uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR?
460 bne- notufp ; Someone is trying to cheat...
461
462 mtsrr1 r3 ; Set new MSR
463
464 b uftExit ; Go load the new MSR...
465
466 notufp: mtcrf 0xFF,r13 ; Restore the used CRs
467 li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code
468 b .L_exception_entry ; Join common...
469
470
471
472
473
474 /*
475 * Trace - generated by single stepping
476 * performance monitor BE branch enable tracing/logging
477 * is also done here now. while this is permanently in the
478 * system the impact is completely unnoticable as this code is
479 * only executed when (a) a single step or branch exception is
480 * hit, (b) in the single step debugger case there is so much
481 * overhead already the few extra instructions for testing for BE
482 * are not even noticable, (c) the BE logging code is *only* run
483 * when it is enabled by the tool which will not happen during
484 * normal system usage
485 *
486 * Note that this trace is available only to user state so we do not
487 * need to set sprg2 before returning.
488 */
489
490 . = 0xD00
491 .L_handlerD00:
492 mtsprg 3,r11 ; Save R11
493 mfsprg r11,2 ; Get the feature flags
494 mtsprg 2,r13 ; Save R13
495 rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag
496 mfcr r13 ; Get the CR
497 mtcrf 0x40,r11 ; Set the CR
498 mfsrr1 r11 ; Get the old MSR
499 rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state?
500
501 mfsprg r11,0 ; Get the per_proc
502 lhz r11,PP_CPU_FLAGS(r11) ; Get the flags
503 crmove cr1_eq,cr0_eq ; Remember if we are in supervisor state
504 rlwinm. r11,r11,0,traceBEb+16,traceBEb+16 ; Special trace enabled?
505 cror cr0_eq,cr0_eq,cr1_eq ; Is trace off or supervisor state?
506 bf-- cr0_eq,specbrtr ; No, we need to trace...
507
508 notspectr: mtcr r13 ; Restore CR
509 li r11,T_TRACE|T_FAM ; Set interrupt code
510 b .L_exception_entry ; Join common...
511
512 .align 5
513
514 ;
515 ; We are doing the special branch trace
516 ;
517
518 specbrtr: mfsprg r11,0 ; Get the per_proc area
519 bt++ 4,sbxx64a ; Jump if 64-bit...
520
521 stw r1,tempr0+4(r11) ; Save in a scratch area
522 stw r2,tempr1+4(r11) ; Save in a scratch area
523 stw r3,tempr2+4(r11) ; Save in a scratch area
524 b sbxx64b ; Skip...
525
526 sbxx64a: std r1,tempr0(r11) ; Save in a scratch area
527 std r2,tempr1(r11) ; Save in a scratch area
528 std r3,tempr2(r11) ; Save in a scratch area
529
530 sbxx64b: lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer
531 lwz r3,spcTRp(r11) ; Pick up buffer position
532 ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer
533 cmplwi cr2,r3,4092 ; Set cr1_eq if we should take exception
534 mfsrr0 r1 ; Get the pc
535 stwx r1,r2,r3 ; Save it in the buffer
536 addi r3,r3,4 ; Point to the next slot
537 rlwinm r3,r3,0,20,31 ; Wrap the slot at one page
538 stw r3,spcTRp(r11) ; Save the new slot
539
540 bt++ 4,sbxx64c ; Jump if 64-bit...
541
542 lwz r1,tempr0+4(r11) ; Restore work register
543 lwz r2,tempr1+4(r11) ; Restore work register
544 lwz r3,tempr2+4(r11) ; Restore work register
545 beq cr2,notspectr ; Buffer filled, make a rupt...
546 b uftExit ; Go restore and leave...
547
548 sbxx64c: ld r1,tempr0(r11) ; Restore work register
549 ld r2,tempr1(r11) ; Restore work register
550 ld r3,tempr2(r11) ; Restore work register
551 beq cr2,notspectr ; Buffer filled, make a rupt...
552 b uftExit ; Go restore and leave...
553
554 /*
555 * Floating point assist
556 */
557
558 . = 0xE00
559 .L_handlerE00:
560 mtsprg 2,r13 /* Save R13 */
561 mtsprg 3,r11 /* Save R11 */
562 li r11,T_FP_ASSIST /* Set 'rupt code */
563 b .L_exception_entry /* Join common... */
564
565
566 /*
567 * Performance monitor interruption
568 */
569
570 . = 0xF00
571 PMIhandler:
572 mtsprg 2,r13 /* Save R13 */
573 mtsprg 3,r11 /* Save R11 */
574 li r11,T_PERF_MON /* Set 'rupt code */
575 b .L_exception_entry /* Join common... */
576
577
578 /*
579 * VMX exception
580 */
581
582 . = 0xF20
583 VMXhandler:
584 mtsprg 2,r13 /* Save R13 */
585 mtsprg 3,r11 /* Save R11 */
586 li r11,T_VMX /* Set 'rupt code */
587 b .L_exception_entry /* Join common... */
588
589
590
591 ;
592 ; Instruction translation miss exception - not supported
593 ;
594
595 . = 0x1000
596 .L_handler1000:
597 mtsprg 2,r13 ; Save R13
598 mtsprg 3,r11 ; Save R11
599 li r11,T_INVALID_EXCP0 ; Set rupt code
600 b .L_exception_entry ; Join common...
601
602
603
604 ;
605 ; Data load translation miss exception - not supported
606 ;
607
608 . = 0x1100
609 .L_handler1100:
610 mtsprg 2,r13 ; Save R13
611 mtsprg 3,r11 ; Save R11
612 li r11,T_INVALID_EXCP1 ; Set rupt code
613 b .L_exception_entry ; Join common...
614
615
616
617 ;
618 ; Data store translation miss exception - not supported
619 ;
620
621 . = 0x1200
622 .L_handler1200:
623 mtsprg 2,r13 ; Save R13
624 mtsprg 3,r11 ; Save R11
625 li r11,T_INVALID_EXCP2 ; Set rupt code
626 b .L_exception_entry ; Join common...
627
628
629 /*
630 * Instruction address breakpoint
631 */
632
633 . = 0x1300
634 .L_handler1300:
635 mtsprg 2,r13 /* Save R13 */
636 mtsprg 3,r11 /* Save R11 */
637 li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */
638 b .L_exception_entry /* Join common... */
639
640 /*
641 * System management interrupt
642 */
643
644 . = 0x1400
645 .L_handler1400:
646 mtsprg 2,r13 /* Save R13 */
647 mtsprg 3,r11 /* Save R11 */
648 li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */
649 b .L_exception_entry /* Join common... */
650
651
652 /*
653 * Soft Patch
654 */
655
656 . = 0x1500
657 .L_handler1500:
658 mtsprg 2,r13 /* Save R13 */
659 mtsprg 3,r11 /* Save R11 */
660 li r11,T_SOFT_PATCH /* Set 'rupt code */
661 b .L_exception_entry /* Join common... */
662
663 ;
664 ; Altivec Java Mode Assist interrupt or Maintenace interrupt
665 ;
666
667 . = 0x1600
668 .L_handler1600:
669 mtsprg 2,r13 /* Save R13 */
670 mtsprg 3,r11 /* Save R11 */
671 li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */
672 b .L_exception_entry /* Join common... */
673
674 ;
675 ; Altivec Java Mode Assist interrupt or Thermal interruption
676 ;
677
678 . = 0x1700
679 .L_handler1700:
680 mtsprg 2,r13 /* Save R13 */
681 mtsprg 3,r11 /* Save R11 */
682 li r11,T_THERMAL /* Set 'rupt code */
683 b .L_exception_entry /* Join common... */
684
685 ;
686 ; Thermal interruption - 64-bit
687 ;
688
689 . = 0x1800
690 .L_handler1800:
691 mtsprg 2,r13 /* Save R13 */
692 mtsprg 3,r11 /* Save R11 */
693 li r11,T_ARCHDEP0 /* Set 'rupt code */
694 b .L_exception_entry /* Join common... */
695
696 /*
697 * There is now a large gap of reserved traps
698 */
699
700 /*
701 * Instrumentation interruption
702 */
703
704 . = 0x2000
705 .L_handler2000:
706 mtsprg 2,r13 /* Save R13 */
707 mtsprg 3,r11 /* Save R11 */
708 li r11,T_INSTRUMENTATION /* Set 'rupt code */
709 b .L_exception_entry /* Join common... */
710
711 . = 0x2100
712
713 /*
714 * Filter Ultra Fast Path syscalls for VMM
715 */
716 ufpVM:
717 cmpwi cr2,r0,0x6004 ; Is it vmm_dispatch
718 bne cr2,notufp ; Exit If not
719 cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
720 cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
721 cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
722 bt- cr1_eq,notufp ; Exit if out of range
723 b EXT(vmm_ufp) ; Ultra Fast Path syscall
724
725 /*
726 * .L_exception_entry(type)
727 *
728 * This is the common exception handling routine called by any
729 * type of system exception.
730 *
731 * ENTRY: via a system exception handler, thus interrupts off, VM off.
732 * r3 has been saved in sprg3 and now contains a number
733 * representing the exception's origins
734 *
735 */
736
737 .data
738 .align ALIGN
739 .globl EXT(exception_entry)
740 EXT(exception_entry):
741 .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */
742
743 VECTOR_SEGMENT
744 .align 5
745
746 .L_exception_entry:
747
748 /*
749 *
750 * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ
751 * instruction to clear and allcoate a line in the cache. This way we won't take any cache
752 * misses, so these stores won't take all that long. Except the first line that is because
753 * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
754 * off also.
755 *
756 * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
757 * are ignored.
758 */
759
760
761 .globl EXT(extPatch32)
762
763
764 LEXT(extPatch32)
765 b extEntry64 ; Go do 64-bit (patched out for 32-bit)
766 mfsprg r13,0 ; Load per_proc
767 lwz r13,next_savearea+4(r13) ; Get the exception save area
768 stw r0,saver0+4(r13) ; Save register 0
769 stw r1,saver1+4(r13) ; Save register 1
770
771 mfspr r1,hid0 ; Get HID0
772 mfcr r0 ; Save the whole CR
773
774 mtcrf 0x20,r1 ; Get set to test for sleep
775 cror doze,doze,nap ; Remember if we are napping
776 bf sleep,notsleep ; Skip if we are not trying to sleep
777
778 mtcrf 0x20,r0 ; Restore the CR
779 lwz r0,saver0+4(r13) ; Restore R0
780 lwz r1,saver1+4(r13) ; Restore R1
781 mfsprg r13,0 ; Get the per_proc
782 lwz r11,pfAvailable(r13) ; Get back the feature flags
783 mfsprg r13,2 ; Restore R13
784 mtsprg 2,r11 ; Set sprg2 to the features
785 mfsprg r11,3 ; Restore R11
786 rfi ; Jump back into sleep code...
787 .long 0 ; Leave these here please...
788 .long 0
789 .long 0
790 .long 0
791 .long 0
792 .long 0
793 .long 0
794 .long 0
795
796
797 ;
798 ; This is the 32-bit context saving stuff
799 ;
800
801 .align 5
802
803 notsleep: stw r2,saver2+4(r13) ; Save this one
804 bf doze,notspdo ; Skip the next if we are not napping/dozing...
805 rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits
806 mtspr hid0,r2 ; Clear the nap/doze bits
807 notspdo:
808
809 #if INSTRUMENT
810 mfspr r2,pmc1 ; INSTRUMENT - saveinstr[0] - Take earliest possible stamp
811 stw r2,0x6100+(0x00*16)+0x0(0) ; INSTRUMENT - Save it
812 mfspr r2,pmc2 ; INSTRUMENT - Get stamp
813 stw r2,0x6100+(0x00*16)+0x4(0) ; INSTRUMENT - Save it
814 mfspr r2,pmc3 ; INSTRUMENT - Get stamp
815 stw r2,0x6100+(0x00*16)+0x8(0) ; INSTRUMENT - Save it
816 mfspr r2,pmc4 ; INSTRUMENT - Get stamp
817 stw r2,0x6100+(0x00*16)+0xC(0) ; INSTRUMENT - Save it
818 #endif
819
820 la r1,saver4(r13) ; Point to the next line in case we need it
821 crmove wasNapping,doze ; Remember if we were napping
822 mfsprg r2,0 ; Get the per_proc area
823 dcbz 0,r1 ; allocate r4-r7 32-byte line in cache
824
825 ;
826 ; Remember, we are setting up CR6 with feature flags
827 ;
828 andi. r1,r11,T_FAM ; Check FAM bit
829
830 stw r3,saver3+4(r13) ; Save this one
831 stw r4,saver4+4(r13) ; Save this one
832 andc r11,r11,r1 ; Clear FAM bit
833 beq+ noFAM ; Is it FAM intercept
834 mfsrr1 r3 ; Load srr1
835 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
836 beq+ noFAM ; From supervisor state
837 lwz r1,spcFlags(r2) ; Load spcFlags
838 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
839 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
840 bne+ noFAM ; Can this context be FAM intercept
841 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
842 srwi r1,r11,2 ; divide r11 by 4
843 lis r3,0x8000 ; Set r3 to 0x80000000
844 srw r1,r3,r1 ; Set bit for current exception
845 and. r1,r1,r4 ; And current exception with the intercept mask
846 beq+ noFAM ; Is it FAM intercept
847 b EXT(vmm_fam_exc)
848 noFAM:
849 lwz r1,pfAvailable(r2) ; Get the CPU features flags
850 la r3,saver8(r13) ; Point to line with r8-r11
851 mtcrf 0xE2,r1 ; Put the features flags (that we care about) in the CR
852 dcbz 0,r3 ; allocate r8-r11 32-byte line in cache
853 la r3,saver12(r13) ; point to r12-r15 line
854 lis r4,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
855 stw r6,saver6+4(r13) ; Save this one
856 ori r4,r4,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
857 stw r8,saver8+4(r13) ; Save this one
858 crmove featAltivec,pfAltivecb ; Set the Altivec flag
859 mtmsr r4 ; Set MSR
860 isync
861 mfsrr0 r6 ; Get the interruption SRR0
862 la r8,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
863 dcbz 0,r3 ; allocate r12-r15 32-byte line in cache
864 la r3,saver16(r13) ; point to next line
865 dcbz 0,r8 ; allocate 32-byte line with SRR0, SRR1, CR, XER, and LR
866 stw r7,saver7+4(r13) ; Save this one
867 lhz r8,PP_CPU_FLAGS(r2) ; Get the flags
868 mfsrr1 r7 ; Get the interrupt SRR1
869 rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
870 stw r6,savesrr0+4(r13) ; Save the SRR0
871 rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit
872 stw r5,saver5+4(r13) ; Save this one
873 and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on
874 mfsprg r6,2 ; Get interrupt time R13
875 mtsprg 2,r1 ; Set the feature flags
876 andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set
877 mfsprg r8,3 ; Get rupt time R11
878 stw r7,savesrr1+4(r13) ; Save SRR1
879 stw r8,saver11+4(r13) ; Save rupt time R11
880 stw r6,saver13+4(r13) ; Save rupt R13
881 dcbz 0,r3 ; allocate 32-byte line with r16-r19
882 la r3,saver20(r13) ; point to next line
883
884 getTB: mftbu r6 ; Get the upper timebase
885 mftb r7 ; Get the lower timebase
886 mftbu r8 ; Get the upper one again
887 cmplw r6,r8 ; Did the top tick?
888 bne- getTB ; Yeah, need to get it again...
889
890 #if INSTRUMENT
891 mfspr r6,pmc1 ; INSTRUMENT - saveinstr[1] - Save halfway context save stamp
892 stw r6,0x6100+(0x01*16)+0x0(0) ; INSTRUMENT - Save it
893 mfspr r6,pmc2 ; INSTRUMENT - Get stamp
894 stw r6,0x6100+(0x01*16)+0x4(0) ; INSTRUMENT - Save it
895 mfspr r6,pmc3 ; INSTRUMENT - Get stamp
896 stw r6,0x6100+(0x01*16)+0x8(0) ; INSTRUMENT - Save it
897 mfspr r6,pmc4 ; INSTRUMENT - Get stamp
898 stw r6,0x6100+(0x01*16)+0xC(0) ; INSTRUMENT - Save it
899 #endif
900
901 stw r8,ruptStamp(r2) ; Save the top of time stamp
902 stw r8,SAVtime(r13) ; Save the top of time stamp
903 stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp
904 stw r7,SAVtime+4(r13) ; Save the bottom of time stamp
905
906 dcbz 0,r3 ; allocate 32-byte line with r20-r23
907 stw r9,saver9+4(r13) ; Save this one
908
909 stw r10,saver10+4(r13) ; Save this one
910 mflr r4 ; Get the LR
911 mfxer r10 ; Get the XER
912
913 bf+ wasNapping,notNapping ; Skip if not waking up from nap...
914
915 lwz r6,napStamp+4(r2) ; Pick up low order nap stamp
916 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
917 lwz r5,napStamp(r2) ; and high order
918 subfc r7,r6,r7 ; Subtract low stamp from now
919 lwz r6,napTotal+4(r2) ; Pick up low total
920 subfe r5,r5,r8 ; Subtract high stamp and borrow from now
921 lwz r8,napTotal(r2) ; Pick up the high total
922 addc r6,r6,r7 ; Add low to total
923 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
924 adde r8,r8,r5 ; Add high and carry to total
925 stw r6,napTotal+4(r2) ; Save the low total
926 stw r8,napTotal(r2) ; Save the high total
927 stw r3,savesrr0+4(r13) ; Modify to return to nap/doze exit
928
929 rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored?
930 beq notInSlowNap
931
932 lwz r3,pfHID1(r2) ; Get saved HID1 value
933 mtspr hid1,r3 ; Restore HID1
934
935 notInSlowNap:
936 rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored?
937 beq notNapping
938
939 lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value
940 mtspr msscr0,r3 ; Restore MSSCR0
941 sync
942 isync
943
944 notNapping: stw r12,saver12+4(r13) ; Save this one
945
946 stw r14,saver14+4(r13) ; Save this one
947 stw r15,saver15+4(r13) ; Save this one
948 la r14,saver24(r13) ; Point to the next block to save into
949 mfctr r6 ; Get the CTR
950 stw r16,saver16+4(r13) ; Save this one
951 la r15,savectr(r13) ; point to line with CTR, DAR, DSISR, Exception code, and VRSAVE
952 stw r4,savelr+4(r13) ; Save rupt LR
953
954 dcbz 0,r14 ; allocate 32-byte line with r24-r27
955 la r16,saver28(r13) ; point to line with r28-r31
956 dcbz 0,r15 ; allocate line with CTR, DAR, DSISR, Exception code, and VRSAVE
957 stw r17,saver17+4(r13) ; Save this one
958 stw r18,saver18+4(r13) ; Save this one
959 stw r6,savectr+4(r13) ; Save rupt CTR
960 stw r0,savecr(r13) ; Save rupt CR
961 stw r19,saver19+4(r13) ; Save this one
962 mfdar r6 ; Get the rupt DAR
963 stw r20,saver20+4(r13) ; Save this one
964 dcbz 0,r16 ; allocate 32-byte line with r28-r31
965
966 stw r21,saver21+4(r13) ; Save this one
967 lwz r21,spcFlags(r2) ; Get the special flags from per_proc
968 stw r10,savexer+4(r13) ; Save the rupt XER
969 stw r30,saver30+4(r13) ; Save this one
970 lhz r30,pfrptdProc(r2) ; Get the reported processor type
971 stw r31,saver31+4(r13) ; Save this one
972 stw r22,saver22+4(r13) ; Save this one
973 stw r23,saver23+4(r13) ; Save this one
974 stw r24,saver24+4(r13) ; Save this one
975 stw r25,saver25+4(r13) ; Save this one
976 mfdsisr r7 ; Get the rupt DSISR
977 stw r26,saver26+4(r13) ; Save this one
978 stw r27,saver27+4(r13) ; Save this one
979 andis. r21,r21,hi16(perfMonitor) ; Is the performance monitor enabled?
980 stw r28,saver28+4(r13) ; Save this one
981 cmpwi cr1, r30,CPU_SUBTYPE_POWERPC_750 ; G3?
982 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
983 cmpwi cr2,r30,CPU_SUBTYPE_POWERPC_7400 ; This guy?
984 stw r29,saver29+4(r13) ; Save R29
985 stw r6,savedar+4(r13) ; Save the rupt DAR
986 li r10,savepmc ; Point to pmc savearea
987
988 beq+ noPerfMonSave32 ; No perfmon on here...
989
990 dcbz r10,r13 ; Clear first part of pmc area
991 li r10,savepmc+0x20 ; Point to pmc savearea second part
992 li r22,0 ; r22: zero
993 dcbz r10,r13 ; Clear second part of pmc area
994
995 beq cr1,perfMonSave32_750 ; This is a G3...
996
997 beq cr2,perfMonSave32_7400 ; Regular olde G4...
998
999 mfspr r24,pmc5 ; Here for a 7450
1000 mfspr r25,pmc6
1001 stw r24,savepmc+16(r13) ; Save PMC5
1002 stw r25,savepmc+20(r13) ; Save PMC6
1003 mtspr pmc5,r22 ; Leave PMC5 clear
1004 mtspr pmc6,r22 ; Leave PMC6 clear
1005
1006 perfMonSave32_7400:
1007 mfspr r25,mmcr2
1008 stw r25,savemmcr2+4(r13) ; Save MMCR2
1009 mtspr mmcr2,r22 ; Leave MMCR2 clear
1010
1011 perfMonSave32_750:
1012 mfspr r23,mmcr0
1013 mfspr r24,mmcr1
1014 stw r23,savemmcr0+4(r13) ; Save MMCR0
1015 stw r24,savemmcr1+4(r13) ; Save MMCR1
1016 mtspr mmcr0,r22 ; Leave MMCR0 clear
1017 mtspr mmcr1,r22 ; Leave MMCR1 clear
1018 mfspr r23,pmc1
1019 mfspr r24,pmc2
1020 mfspr r25,pmc3
1021 mfspr r26,pmc4
1022 stw r23,savepmc+0(r13) ; Save PMC1
1023 stw r24,savepmc+4(r13) ; Save PMC2
1024 stw r25,savepmc+8(r13) ; Save PMC3
1025 stw r26,savepmc+12(r13) ; Save PMC4
1026 mtspr pmc1,r22 ; Leave PMC1 clear
1027 mtspr pmc2,r22 ; Leave PMC2 clear
1028 mtspr pmc3,r22 ; Leave PMC3 clear
1029 mtspr pmc4,r22 ; Leave PMC4 clear
1030
1031 noPerfMonSave32:
1032 dcbz 0,r27 ; allocate line with VSCR and FPSCR
1033
1034 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1035 stw r11,saveexception(r13) ; Save the exception code
1036
1037
1038 ;
1039 ; Everything is saved at this point, except for FPRs, and VMX registers.
1040 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1041 ;
1042
1043 lwz r25,traceMask(0) ; Get the trace mask
1044 li r0,SAVgeneral ; Get the savearea type value
1045 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1046 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
1047 stb r0,SAVflags+2(r13) ; Mark valid context
1048 addi r22,r22,10 ; Adjust code so we shift into CR5
1049 li r23,trcWork ; Get the trace work area address
1050 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1051 li r26,0x8 ; Get start of cpu mask
1052 srw r26,r26,r19 ; Get bit position of cpu number
1053 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1054 and. r26,r26,r25 ; See if we trace this cpu
1055 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1056 ;
1057 ; At this point, we can take another exception and lose nothing.
1058 ;
1059
1060 #if INSTRUMENT
1061 mfspr r26,pmc1 ; INSTRUMENT - saveinstr[2] - Take stamp after save is done
1062 stw r26,0x6100+(0x02*16)+0x0(0) ; INSTRUMENT - Save it
1063 mfspr r26,pmc2 ; INSTRUMENT - Get stamp
1064 stw r26,0x6100+(0x02*16)+0x4(0) ; INSTRUMENT - Save it
1065 mfspr r26,pmc3 ; INSTRUMENT - Get stamp
1066 stw r26,0x6100+(0x02*16)+0x8(0) ; INSTRUMENT - Save it
1067 mfspr r26,pmc4 ; INSTRUMENT - Get stamp
1068 stw r26,0x6100+(0x02*16)+0xC(0) ; INSTRUMENT - Save it
1069 #endif
1070
1071 bne+ cr5,xcp32xit ; Skip all of this if no tracing here...
1072
1073 ;
1074 ; We select a trace entry using a compare and swap on the next entry field.
1075 ; Since we do not lock the actual trace buffer, there is a potential that
1076 ; another processor could wrap an trash our entry. Who cares?
1077 ;
1078
1079 lwz r25,traceStart(0) ; Get the start of trace table
1080 lwz r26,traceEnd(0) ; Get end of trace table
1081
1082 trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1083
1084 addi r22,r20,LTR_size ; Point to the next trace entry
1085 cmplw r22,r26 ; Do we need to wrap the trace table?
1086 bne+ gotTrcEnt ; No wrap, we got us a trace entry...
1087
1088 mr r22,r25 ; Wrap back to start
1089
1090 gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer
1091 bne- trcsel ; Collision, try again...
1092
1093 #if ESPDEBUG
1094 dcbf 0,r23 ; Force to memory
1095 sync
1096 #endif
1097
1098 dcbz 0,r20 ; Clear and allocate first trace line
1099
1100 ;
1101 ; Let us cut that trace entry now.
1102 ;
1103
1104 lwz r16,ruptStamp(r2) ; Get top of time base
1105 lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp
1106
1107 li r14,32 ; Offset to second line
1108
1109 lwz r0,saver0+4(r13) ; Get back interrupt time R0
1110 lwz r1,saver1+4(r13) ; Get back interrupt time R1
1111 lwz r8,savecr(r13) ; Get the CR value
1112
1113 dcbz r14,r20 ; Zap the second line
1114
1115 sth r19,LTR_cpu(r20) ; Stash the cpu number
1116 li r14,64 ; Offset to third line
1117 sth r11,LTR_excpt(r20) ; Save the exception type
1118 lwz r7,saver2+4(r13) ; Get back interrupt time R2
1119 lwz r3,saver3+4(r13) ; Restore this one
1120
1121 dcbz r14,r20 ; Zap the third half
1122
1123 mfdsisr r9 ; Get the DSISR
1124 li r14,96 ; Offset to forth line
1125 stw r16,LTR_timeHi(r20) ; Set the upper part of TB
1126 stw r17,LTR_timeLo(r20) ; Set the lower part of TB
1127 lwz r10,savelr+4(r13) ; Get the LR
1128 mfsrr0 r17 ; Get SRR0 back, it is still good
1129
1130 dcbz r14,r20 ; Zap the forth half
1131 lwz r4,saver4+4(r13) ; Restore this one
1132 lwz r5,saver5+4(r13) ; Restore this one
1133 mfsrr1 r18 ; SRR1 is still good in here
1134
1135 stw r8,LTR_cr(r20) ; Save the CR
1136 lwz r6,saver6+4(r13) ; Get R6
1137 mfdar r16 ; Get this back
1138 stw r9,LTR_dsisr(r20) ; Save the DSISR
1139 stw r17,LTR_srr0+4(r20) ; Save the SSR0
1140
1141 stw r18,LTR_srr1+4(r20) ; Save the SRR1
1142 stw r16,LTR_dar+4(r20) ; Save the DAR
1143 mfctr r17 ; Get the CTR (still good in register)
1144 stw r13,LTR_save+4(r20) ; Save the savearea
1145 stw r10,LTR_lr+4(r20) ; Save the LR
1146
1147 stw r17,LTR_ctr+4(r20) ; Save off the CTR
1148 stw r0,LTR_r0+4(r20) ; Save off register 0
1149 stw r1,LTR_r1+4(r20) ; Save off register 1
1150 stw r7,LTR_r2+4(r20) ; Save off register 2
1151
1152
1153 stw r3,LTR_r3+4(r20) ; Save off register 3
1154 stw r4,LTR_r4+4(r20) ; Save off register 4
1155 stw r5,LTR_r5+4(r20) ; Save off register 5
1156 stw r6,LTR_r6+4(r20) ; Save off register 6
1157
1158 #if ESPDEBUG
1159 addi r17,r20,32 ; Second line
1160 addi r16,r20,64 ; Third line
1161 dcbst br0,r20 ; Force to memory
1162 dcbst br0,r17 ; Force to memory
1163 addi r17,r17,32 ; Fourth line
1164 dcbst br0,r16 ; Force to memory
1165 dcbst br0,r17 ; Force to memory
1166
1167 sync ; Make sure it all goes
1168 #endif
1169 xcp32xit: mr r14,r11 ; Save the interrupt code across the call
1170 bl EXT(save_get_phys_32) ; Grab a savearea
1171 mfsprg r2,0 ; Get the per_proc info
1172 li r10,emfp0 ; Point to floating point save
1173 mr r11,r14 ; Get the exception code back
1174 dcbz r10,r2 ; Clear for speed
1175 stw r3,next_savearea+4(r2) ; Store the savearea for the next rupt
1176
1177 #if INSTRUMENT
1178 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[3] - Take stamp after next savearea
1179 stw r4,0x6100+(0x03*16)+0x0(0) ; INSTRUMENT - Save it
1180 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
1181 stw r4,0x6100+(0x03*16)+0x4(0) ; INSTRUMENT - Save it
1182 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
1183 stw r4,0x6100+(0x03*16)+0x8(0) ; INSTRUMENT - Save it
1184 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
1185 stw r4,0x6100+(0x03*16)+0xC(0) ; INSTRUMENT - Save it
1186 #endif
1187 b xcpCommon ; Go join the common interrupt processing...
1188
1189 ;
1190 ;
1191 ; This is the 64-bit context saving stuff
1192 ;
1193
1194 .align 5
1195
1196 extEntry64: mfsprg r13,0 ; Load per_proc
1197 ld r13,next_savearea(r13) ; Get the exception save area
1198 std r0,saver0(r13) ; Save register 0
1199 lis r0,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
1200 std r1,saver1(r13) ; Save register 1
1201 ori r1,r0,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
1202 lis r0,0x0010 ; Get rupt code transform validity mask
1203 mtmsr r1 ; Set MSR
1204 isync
1205
1206 ori r0,r0,0x0200 ; Get rupt code transform validity mask
1207 std r2,saver2(r13) ; Save this one
1208 lis r1,0x00F0 ; Top half of xform XOR
1209 rlwinm r2,r11,29,27,31 ; Get high 5 bits of rupt code
1210 std r3,saver3(r13) ; Save this one
1211 slw r0,r0,r2 ; Move transform validity bit to bit 0
1212 std r4,saver4(r13) ; Save this one
1213 std r5,saver5(r13) ; Save this one
1214 ori r1,r1,0x04EC ; Bottom half of xform XOR
1215 mfxer r5 ; Save the XER because we are about to muck with it
1216 rlwinm r4,r11,1,27,28 ; Get bottom of interrupt code * 8
1217 lis r3,hi16(dozem|napm) ; Get the nap and doze bits
1218 srawi r0,r0,31 ; Get 0xFFFFFFFF of xform valid, 0 otherwise
1219 rlwnm r4,r1,r4,24,31 ; Extract the xform XOR
1220 li r1,saver16 ; Point to the next line
1221 and r4,r4,r0 ; Only keep transform if we are to use it
1222 li r2,lgKillResv ; Point to the killing field
1223 mfcr r0 ; Save the CR
1224 stwcx. r2,0,r2 ; Kill any pending reservation
1225 dcbz128 r1,r13 ; Blow away the line
1226 sldi r3,r3,32 ; Position it
1227 mfspr r1,hid0 ; Get HID0
1228 andc r3,r1,r3 ; Clear nap and doze
1229 xor r11,r11,r4 ; Transform 970 rupt code to standard keeping FAM bit
1230 cmpld r3,r1 ; See if nap and/or doze was on
1231 std r6,saver6(r13) ; Save this one
1232 mfsprg r2,0 ; Get the per_proc area
1233 la r6,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
1234 beq++ eE64NoNap ; No nap here, skip all this...
1235
1236 sync ; Make sure we are clean
1237 mtspr hid0,r3 ; Set the updated hid0
1238 mfspr r1,hid0 ; Yes, this is silly, keep it here
1239 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1240 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1241 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1242 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1243 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1244
1245 eE64NoNap: crnot wasNapping,cr0_eq ; Remember if we were napping
1246 andi. r1,r11,T_FAM ; Check FAM bit
1247 beq++ eEnoFAM ; Is it FAM intercept
1248 mfsrr1 r3 ; Load srr1
1249 andc r11,r11,r1 ; Clear FAM bit
1250 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1251 beq+ eEnoFAM ; From supervisor state
1252 lwz r1,spcFlags(r2) ; Load spcFlags
1253 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1254 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
1255 bne++ eEnoFAM ; Can this context be FAM intercept
1256 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
1257 li r3,0 ; Clear
1258 srwi r1,r11,2 ; divide r11 by 4
1259 oris r3,r3,0x8000 ; Set r3 to 0x80000000
1260 srw r1,r3,r1 ; Set bit for current exception
1261 and. r1,r1,r4 ; And current exception with the intercept mask
1262 beq++ eEnoFAM ; Is it FAM intercept
1263 b EXT(vmm_fam_exc)
1264
1265 .align 5
1266
1267 eEnoFAM: lwz r1,pfAvailable(r2) ; Get the CPU features flags
1268 dcbz128 0,r6 ; allocate 128-byte line with SRR0, SRR1, CR, XER, and LR
1269
1270 ;
1271 ; Remember, we are setting up CR6 with feature flags
1272 ;
1273 std r7,saver7(r13) ; Save this one
1274 mtcrf 0x80,r1 ; Put the features flags (that we care about) in the CR
1275 std r8,saver8(r13) ; Save this one
1276 mtcrf 0x40,r1 ; Put the features flags (that we care about) in the CR
1277 mfsrr0 r6 ; Get the interruption SRR0
1278 lhz r8,PP_CPU_FLAGS(r2) ; Get the flags
1279 mtcrf 0x20,r1 ; Put the features flags (that we care about) in the CR
1280 mfsrr1 r7 ; Get the interrupt SRR1
1281 rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
1282 std r6,savesrr0(r13) ; Save the SRR0
1283 mtcrf 0x02,r1 ; Put the features flags (that we care about) in the CR
1284 rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit
1285 and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on
1286 std r9,saver9(r13) ; Save this one
1287 andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set
1288 crmove featAltivec,pfAltivecb ; Set the Altivec flag
1289 std r7,savesrr1(r13) ; Save SRR1
1290 mfsprg r9,3 ; Get rupt time R11
1291 std r10,saver10(r13) ; Save this one
1292 mfsprg r6,2 ; Get interrupt time R13
1293 std r9,saver11(r13) ; Save rupt time R11
1294 mtsprg 2,r1 ; Set the feature flags
1295 std r12,saver12(r13) ; Save this one
1296 mflr r4 ; Get the LR
1297 mftb r7 ; Get the timebase
1298 std r6,saver13(r13) ; Save rupt R13
1299 std r7,ruptStamp(r2) ; Save the time stamp
1300 std r7,SAVtime(r13) ; Save the time stamp
1301
1302 bf++ wasNapping,notNappingSF ; Skip if not waking up from nap...
1303
1304 ld r6,napStamp(r2) ; Pick up nap stamp
1305 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
1306 sub r7,r7,r6 ; Subtract stamp from now
1307 ld r6,napTotal(r2) ; Pick up total
1308 add r6,r6,r7 ; Add low to total
1309 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
1310 std r6,napTotal(r2) ; Save the high total
1311 std r3,savesrr0(r13) ; Modify to return to nap/doze exit
1312
1313 notNappingSF:
1314 std r14,saver14(r13) ; Save this one
1315 std r15,saver15(r13) ; Save this one
1316 stw r0,savecr(r13) ; Save rupt CR
1317 mfctr r6 ; Get the CTR
1318 std r16,saver16(r13) ; Save this one
1319 std r4,savelr(r13) ; Save rupt LR
1320
1321 std r17,saver17(r13) ; Save this one
1322 li r7,savepmc ; Point to pmc area
1323 std r18,saver18(r13) ; Save this one
1324 lwz r17,spcFlags(r2) ; Get the special flags from per_proc
1325 std r6,savectr(r13) ; Save rupt CTR
1326 std r19,saver19(r13) ; Save this one
1327 mfdar r6 ; Get the rupt DAR
1328 std r20,saver20(r13) ; Save this one
1329
1330 dcbz128 r7,r13 ; Clear out the pmc spot
1331
1332 std r21,saver21(r13) ; Save this one
1333 std r5,savexer(r13) ; Save the rupt XER
1334 std r22,saver22(r13) ; Save this one
1335 std r23,saver23(r13) ; Save this one
1336 std r24,saver24(r13) ; Save this one
1337 std r25,saver25(r13) ; Save this one
1338 mfdsisr r7 ; Get the rupt DSISR
1339 std r26,saver26(r13) ; Save this one
1340 andis. r17,r17,hi16(perfMonitor) ; Is the performance monitor enabled?
1341 std r27,saver27(r13) ; Save this one
1342 li r10,emfp0 ; Point to floating point save
1343 std r28,saver28(r13) ; Save this one
1344 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
1345 std r29,saver29(r13) ; Save R29
1346 std r30,saver30(r13) ; Save this one
1347 std r31,saver31(r13) ; Save this one
1348 std r6,savedar(r13) ; Save the rupt DAR
1349 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1350 stw r11,saveexception(r13) ; Save the exception code
1351
1352 beq++ noPerfMonSave64 ; Performance monitor not on...
1353
1354 li r22,0 ; r22: zero
1355
1356 mfspr r23,mmcr0_gp
1357 mfspr r24,mmcr1_gp
1358 mfspr r25,mmcra_gp
1359 std r23,savemmcr0(r13) ; Save MMCR0
1360 std r24,savemmcr1(r13) ; Save MMCR1
1361 std r25,savemmcr2(r13) ; Save MMCRA
1362 mtspr mmcr0_gp,r22 ; Leave MMCR0 clear
1363 mtspr mmcr1_gp,r22 ; Leave MMCR1 clear
1364 mtspr mmcra_gp,r22 ; Leave MMCRA clear
1365 mfspr r23,pmc1_gp
1366 mfspr r24,pmc2_gp
1367 mfspr r25,pmc3_gp
1368 mfspr r26,pmc4_gp
1369 stw r23,savepmc+0(r13) ; Save PMC1
1370 stw r24,savepmc+4(r13) ; Save PMC2
1371 stw r25,savepmc+8(r13) ; Save PMC3
1372 stw r26,savepmc+12(r13) ; Save PMC4
1373 mfspr r23,pmc5_gp
1374 mfspr r24,pmc6_gp
1375 mfspr r25,pmc7_gp
1376 mfspr r26,pmc8_gp
1377 stw r23,savepmc+16(r13) ; Save PMC5
1378 stw r24,savepmc+20(r13) ; Save PMC6
1379 stw r25,savepmc+24(r13) ; Save PMC7
1380 stw r26,savepmc+28(r13) ; Save PMC8
1381 mtspr pmc1_gp,r22 ; Leave PMC1 clear
1382 mtspr pmc2_gp,r22 ; Leave PMC2 clear
1383 mtspr pmc3_gp,r22 ; Leave PMC3 clear
1384 mtspr pmc4_gp,r22 ; Leave PMC4 clear
1385 mtspr pmc5_gp,r22 ; Leave PMC5 clear
1386 mtspr pmc6_gp,r22 ; Leave PMC6 clear
1387 mtspr pmc7_gp,r22 ; Leave PMC7 clear
1388 mtspr pmc8_gp,r22 ; Leave PMC8 clear
1389
1390 noPerfMonSave64:
1391
1392 ;
1393 ; Everything is saved at this point, except for FPRs, and VMX registers.
1394 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1395 ;
1396
1397 lwz r25,traceMask(0) ; Get the trace mask
1398 li r0,SAVgeneral ; Get the savearea type value
1399 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1400 stb r0,SAVflags+2(r13) ; Mark valid context
1401 ori r23,r23,lo16(EXT(trcWork)) ; Get the rest
1402 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
1403 li r23,trcWork ; Get the trace work area address
1404 addi r22,r22,10 ; Adjust code so we shift into CR5
1405 li r26,0x8 ; Get start of cpu mask
1406 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1407 srw r26,r26,r19 ; Get bit position of cpu number
1408 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1409 and. r26,r26,r25 ; See if we trace this cpu
1410 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1411
1412 bne++ cr5,xcp64xit ; Skip all of this if no tracing here...
1413
1414 ;
1415 ; We select a trace entry using a compare and swap on the next entry field.
1416 ; Since we do not lock the actual trace buffer, there is a potential that
1417 ; another processor could wrap an trash our entry. Who cares?
1418 ;
1419
1420 lwz r25,traceStart(0) ; Get the start of trace table
1421 lwz r26,traceEnd(0) ; Get end of trace table
1422
1423 trcselSF: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1424
1425 addi r22,r20,LTR_size ; Point to the next trace entry
1426 cmplw r22,r26 ; Do we need to wrap the trace table?
1427 bne+ gotTrcEntSF ; No wrap, we got us a trace entry...
1428
1429 mr r22,r25 ; Wrap back to start
1430
1431 gotTrcEntSF:
1432 stwcx. r22,0,r23 ; Try to update the current pointer
1433 bne- trcselSF ; Collision, try again...
1434
1435 #if ESPDEBUG
1436 dcbf 0,r23 ; Force to memory
1437 sync
1438 #endif
1439
1440 ;
1441 ; Let us cut that trace entry now.
1442 ;
1443
1444 dcbz128 0,r20 ; Zap the trace entry
1445
1446 ld r16,ruptStamp(r2) ; Get top of time base
1447 ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not)
1448 std r16,LTR_timeHi(r20) ; Set the upper part of TB
1449 ld r1,saver1(r13) ; Get back interrupt time R1
1450 ld r18,saver2(r13) ; Get back interrupt time R2
1451 std r0,LTR_r0(r20) ; Save off register 0
1452 ld r3,saver3(r13) ; Restore this one
1453 sth r19,LTR_cpu(r20) ; Stash the cpu number
1454 std r1,LTR_r1(r20) ; Save off register 1
1455 ld r4,saver4(r13) ; Restore this one
1456 std r18,LTR_r2(r20) ; Save off register 2
1457 ld r5,saver5(r13) ; Restore this one
1458 ld r6,saver6(r13) ; Get R6
1459 std r3,LTR_r3(r20) ; Save off register 3
1460 lwz r16,savecr(r13) ; Get the CR value
1461 std r4,LTR_r4(r20) ; Save off register 4
1462 mfsrr0 r17 ; Get SRR0 back, it is still good
1463 std r5,LTR_r5(r20) ; Save off register 5
1464 std r6,LTR_r6(r20) ; Save off register 6
1465 mfsrr1 r18 ; SRR1 is still good in here
1466 stw r16,LTR_cr(r20) ; Save the CR
1467 std r17,LTR_srr0(r20) ; Save the SSR0
1468 std r18,LTR_srr1(r20) ; Save the SRR1
1469
1470 mfdar r17 ; Get this back
1471 ld r16,savelr(r13) ; Get the LR
1472 std r17,LTR_dar(r20) ; Save the DAR
1473 mfctr r17 ; Get the CTR (still good in register)
1474 std r16,LTR_lr(r20) ; Save the LR
1475 std r17,LTR_ctr(r20) ; Save off the CTR
1476 mfdsisr r17 ; Get the DSISR
1477 std r13,LTR_save(r20) ; Save the savearea
1478 stw r17,LTR_dsisr(r20) ; Save the DSISR
1479 sth r11,LTR_excpt(r20) ; Save the exception type
1480
1481 #if ESPDEBUG
1482 dcbf 0,r20 ; Force to memory
1483 sync ; Make sure it all goes
1484 #endif
1485 xcp64xit: mr r14,r11 ; Save the interrupt code across the call
1486 bl EXT(save_get_phys_64) ; Grab a savearea
1487 mfsprg r2,0 ; Get the per_proc info
1488 li r10,emfp0 ; Point to floating point save
1489 mr r11,r14 ; Get the exception code back
1490 dcbz128 r10,r2 ; Clear for speed
1491 std r3,next_savearea(r2) ; Store the savearea for the next rupt
1492 b xcpCommon ; Go join the common interrupt processing...
1493
1494 ;
1495 ; All of the context is saved. Now we will get a
1496 ; fresh savearea. After this we can take an interrupt.
1497 ;
1498
1499 .align 5
1500
1501 xcpCommon:
1502
1503 ;
1504 ; Here we will save some floating point and vector status
1505 ; and we also set a clean default status for a new interrupt level.
1506 ; Note that we assume that emfp0 is on an altivec boundary
1507 ; and that R10 points to it (as a displacemnt from R2).
1508 ;
1509 ; We need to save the FPSCR as if it is normal context.
1510 ; This is because pending exceptions will cause an exception even if
1511 ; FP is disabled. We need to clear the FPSCR when we first start running in the
1512 ; kernel.
1513 ;
1514
1515 stfd f0,emfp0(r2) ; Save FPR0
1516 stfd f1,emfp1(r2) ; Save FPR1
1517 li r19,0 ; Assume no Altivec
1518 mffs f0 ; Get the FPSCR
1519 lfd f1,Zero(0) ; Make a 0
1520 stfd f0,savefpscrpad(r13) ; Save the FPSCR
1521 li r9,0 ; Get set to clear VRSAVE
1522 mtfsf 0xFF,f1 ; Clear it
1523 addi r14,r10,16 ; Displacement to second vector register
1524 lfd f0,emfp0(r2) ; Restore FPR0
1525 la r28,savevscr(r13) ; Point to the status area
1526 lfd f1,emfp1(r2) ; Restore FPR1
1527
1528 bf featAltivec,noavec ; No Altivec on this CPU...
1529
1530 stvxl v0,r10,r2 ; Save a register
1531 stvxl v1,r14,r2 ; Save a second register
1532 mfspr r19,vrsave ; Get the VRSAVE register
1533 mfvscr v0 ; Get the vector status register
1534 vspltish v1,1 ; Turn on the non-Java bit and saturate
1535 stvxl v0,0,r28 ; Save the vector status
1536 vspltisw v0,1 ; Turn on the saturate bit
1537 vxor v1,v1,v0 ; Turn off saturate
1538 mtvscr v1 ; Set the non-java, no saturate status for new level
1539 mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level
1540
1541 lvxl v0,r10,r2 ; Restore first work register
1542 lvxl v1,r14,r2 ; Restore second work register
1543
1544 noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags
1545
1546 ;
1547 ; We are now done saving all of the context. Start filtering the interrupts.
1548 ; Note that a Redrive will count as an actual interrupt.
1549 ; Note also that we take a lot of system calls so we will start decode here.
1550 ;
1551
1552 Redrive:
1553
1554
1555 #if INSTRUMENT
1556 mfspr r20,pmc1 ; INSTRUMENT - saveinstr[4] - Take stamp before exception filter
1557 stw r20,0x6100+(0x04*16)+0x0(0) ; INSTRUMENT - Save it
1558 mfspr r20,pmc2 ; INSTRUMENT - Get stamp
1559 stw r20,0x6100+(0x04*16)+0x4(0) ; INSTRUMENT - Save it
1560 mfspr r20,pmc3 ; INSTRUMENT - Get stamp
1561 stw r20,0x6100+(0x04*16)+0x8(0) ; INSTRUMENT - Save it
1562 mfspr r20,pmc4 ; INSTRUMENT - Get stamp
1563 stw r20,0x6100+(0x04*16)+0xC(0) ; INSTRUMENT - Save it
1564 #endif
1565 lwz r22,SAVflags(r13) ; Pick up the flags
1566 lwz r0,saver0+4(r13) ; Get back interrupt time syscall number
1567 mfsprg r2,0 ; Restore per_proc
1568
1569 li r20,lo16(xcpTable) ; Point to the vector table (note: this must be in 1st 64k of physical memory)
1570 la r12,hwCounts(r2) ; Point to the exception count area
1571 rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving
1572 add r12,r12,r11 ; Point to the count
1573 lwzx r20,r20,r11 ; Get the interrupt handler
1574 lwz r25,0(r12) ; Get the old value
1575 lwz r23,hwRedrives(r2) ; Get the redrive count
1576 xori r24,r22,1 ; Get the NOT of the redrive
1577 mtctr r20 ; Point to the interrupt handler
1578 mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code
1579 add r25,r25,r24 ; Count this one if not a redrive
1580 add r23,r23,r24 ; Count this one if if is a redrive
1581 crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x
1582 stw r25,0(r12) ; Store it back
1583 stw r23,hwRedrives(r2) ; Save the redrive count
1584 bctr ; Go process the exception...
1585
1586
1587 ;
1588 ; Exception vector filter table
1589 ;
1590
1591 .align 7
1592
1593 xcpTable:
1594 .long EatRupt ; T_IN_VAIN
1595 .long PassUpTrap ; T_RESET
1596 .long MachineCheck ; T_MACHINE_CHECK
1597 .long EXT(handlePF) ; T_DATA_ACCESS
1598 .long EXT(handlePF) ; T_INSTRUCTION_ACCESS
1599 .long PassUpRupt ; T_INTERRUPT
1600 .long EXT(AlignAssist) ; T_ALIGNMENT
1601 .long EXT(Emulate) ; T_PROGRAM
1602 .long PassUpFPU ; T_FP_UNAVAILABLE
1603 .long PassUpRupt ; T_DECREMENTER
1604 .long PassUpTrap ; T_IO_ERROR
1605 .long PassUpTrap ; T_RESERVED
1606 .long xcpSyscall ; T_SYSTEM_CALL
1607 .long PassUpTrap ; T_TRACE
1608 .long PassUpTrap ; T_FP_ASSIST
1609 .long PassUpTrap ; T_PERF_MON
1610 .long PassUpVMX ; T_VMX
1611 .long PassUpTrap ; T_INVALID_EXCP0
1612 .long PassUpTrap ; T_INVALID_EXCP1
1613 .long PassUpTrap ; T_INVALID_EXCP2
1614 .long PassUpTrap ; T_INSTRUCTION_BKPT
1615 .long PassUpRupt ; T_SYSTEM_MANAGEMENT
1616 .long EXT(AltivecAssist) ; T_ALTIVEC_ASSIST
1617 .long PassUpRupt ; T_THERMAL
1618 .long PassUpTrap ; T_INVALID_EXCP5
1619 .long PassUpTrap ; T_INVALID_EXCP6
1620 .long PassUpTrap ; T_INVALID_EXCP7
1621 .long PassUpTrap ; T_INVALID_EXCP8
1622 .long PassUpTrap ; T_INVALID_EXCP9
1623 .long PassUpTrap ; T_INVALID_EXCP10
1624 .long PassUpTrap ; T_INVALID_EXCP11
1625 .long PassUpTrap ; T_INVALID_EXCP12
1626 .long PassUpTrap ; T_INVALID_EXCP13
1627
1628 .long PassUpTrap ; T_RUNMODE_TRACE
1629
1630 .long PassUpRupt ; T_SIGP
1631 .long PassUpTrap ; T_PREEMPT
1632 .long conswtch ; T_CSWITCH
1633 .long PassUpRupt ; T_SHUTDOWN
1634 .long PassUpAbend ; T_CHOKE
1635
1636 .long EXT(handleDSeg) ; T_DATA_SEGMENT
1637 .long EXT(handleISeg) ; T_INSTRUCTION_SEGMENT
1638
1639 .long WhoaBaby ; T_SOFT_PATCH
1640 .long WhoaBaby ; T_MAINTENANCE
1641 .long WhoaBaby ; T_INSTRUMENTATION
1642
1643 ;
1644 ; Just what the heck happened here????
1645 ;
1646
1647 .align 5
1648
1649 WhoaBaby: b . ; Open the hood and wait for help
1650
1651
1652 ;
1653 ; System call
1654 ;
1655
1656 .align 5
1657
1658 xcpSyscall: lis r20,hi16(EXT(shandler)) ; Assume this is a normal one, get handler address
1659 rlwinm r6,r0,1,0,31 ; Move sign bit to the end
1660 ori r20,r20,lo16(EXT(shandler)) ; Assume this is a normal one, get handler address
1661 bnl++ cr0,PassUp ; R0 not 0b10xxx...x, can not be any kind of magical system call, just pass it up...
1662 lwz r7,savesrr1+4(r13) ; Get the entering MSR (low half)
1663 lwz r1,dgFlags(0) ; Get the flags
1664 cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it
1665
1666 rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state?
1667 beq++ FCisok ; From supervisor state...
1668
1669 rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid?
1670 beq++ PassUp ; No, treat as a normal one...
1671
1672 FCisok: beq++ cr2,EatRupt ; This is a CutTrace system call, we are done with it...
1673
1674 ;
1675 ; Here is where we call the firmware. If it returns T_IN_VAIN, that means
1676 ; that it has handled the interruption. Remember: thou shalt not trash R13
1677 ; while you are away. Anything else is ok.
1678 ;
1679
1680 lwz r3,saver3+4(r13) ; Restore the first parameter
1681 b EXT(FirmwareCall) ; Go handle the firmware call....
1682
1683 ;
1684 ; Here is where we return from the firmware call
1685 ;
1686
1687 .align 5
1688 .globl EXT(FCReturn)
1689
1690 LEXT(FCReturn)
1691 cmplwi r3,T_IN_VAIN ; Was it handled?
1692 beq+ EatRupt ; Interrupt was handled...
1693 mr r11,r3 ; Put the rupt code into the right register
1694 b Redrive ; Go through the filter again...
1695
1696
1697 ;
1698 ; Here is where we return from the PTE miss and segment exception handler
1699 ;
1700
1701 .align 5
1702 .globl EXT(PFSExit)
1703
1704 LEXT(PFSExit)
1705
1706 #if 0
1707 mfsprg r2,0 ; (BRINGUP)
1708 lwz r0,savedsisr(r13) ; (BRINGUP)
1709 andis. r0,r0,hi16(dsiAC) ; (BRINGUP)
1710 beq++ didnthit ; (BRINGUP)
1711 lwz r0,20(0) ; (BRINGUP)
1712 mr. r0,r0 ; (BRINGUP)
1713 bne-- didnthit ; (BRINGUP)
1714 #if 0
1715 li r0,1 ; (BRINGUP)
1716 stw r0,20(0) ; (BRINGUP)
1717 lis r0,hi16(Choke) ; (BRINGUP)
1718 ori r0,r0,lo16(Choke) ; (BRINGUP)
1719 sc ; (BRINGUP)
1720 #endif
1721
1722 lwz r4,savesrr0+4(r13) ; (BRINGUP)
1723 lwz r8,savesrr1+4(r13) ; (BRINGUP)
1724 lwz r6,savedar+4(r13) ; (BRINGUP)
1725 rlwinm. r0,r8,0,MSR_IR_BIT,MSR_IR_BIT ; (BRINGUP)
1726 mfmsr r9 ; (BRINGUP)
1727 ori r0,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
1728 beq-- hghg ; (BRINGUP)
1729 mtmsr r0 ; (BRINGUP)
1730 isync ; (BRINGUP)
1731
1732 hghg: lwz r5,0(r4) ; (BRINGUP)
1733 beq-- hghg1 ; (BRINGUP)
1734 mtmsr r9 ; (BRINGUP)
1735 isync ; (BRINGUP)
1736
1737 hghg1: rlwinm r7,r5,6,26,31 ; (BRINGUP)
1738 rlwinm r27,r5,14,24,28 ; (BRINGUP)
1739 addi r3,r13,saver0+4 ; (BRINGUP)
1740 lwzx r3,r3,r27 ; (BRINGUP)
1741
1742 #if 0
1743 lwz r27,patcharea+4(r2) ; (BRINGUP)
1744 mr. r3,r3 ; (BRINGUP)
1745 bne++ nbnbnb ; (BRINGUP)
1746 addi r27,r27,1 ; (BRINGUP)
1747 stw r27,patcharea+4(r2) ; (BRINGUP)
1748 nbnbnb:
1749 #endif
1750
1751 rlwinm. r28,r8,0,MSR_DR_BIT,MSR_DR_BIT ; (BRINGUP)
1752 rlwinm r27,r6,0,0,29 ; (BRINGUP)
1753 ori r28,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
1754 mfspr r10,dabr ; (BRINGUP)
1755 li r0,0 ; (BRINGUP)
1756 mtspr dabr,r0 ; (BRINGUP)
1757 cmplwi cr1,r7,31 ; (BRINGUP)
1758 beq-- qqq0 ; (BRINGUP)
1759 mtmsr r28 ; (BRINGUP)
1760 qqq0:
1761 isync ; (BRINGUP)
1762
1763 lwz r27,0(r27) ; (BRINGUP) - Get original value
1764
1765 bne cr1,qqq1 ; (BRINGUP)
1766
1767 rlwinm r5,r5,31,22,31 ; (BRINGUP)
1768 cmplwi cr1,r5,151 ; (BRINGUP)
1769 beq cr1,qqq3 ; (BRINGUP)
1770 cmplwi cr1,r5,407 ; (BRINGUP)
1771 beq cr1,qqq2 ; (BRINGUP)
1772 cmplwi cr1,r5,215 ; (BRINGUP)
1773 beq cr1,qqq0q ; (BRINGUP)
1774 cmplwi cr1,r5,1014 ; (BRINGUP)
1775 beq cr1,qqqm1 ; (BRINGUP)
1776
1777 lis r0,hi16(Choke) ; (BRINGUP)
1778 ori r0,r0,lo16(Choke) ; (BRINGUP)
1779 sc ; (BRINGUP)
1780
1781 qqqm1: rlwinm r7,r6,0,0,26 ; (BRINGUP)
1782 stw r0,0(r7) ; (BRINGUP)
1783 stw r0,4(r7) ; (BRINGUP)
1784 stw r0,8(r7) ; (BRINGUP)
1785 stw r0,12(r7) ; (BRINGUP)
1786 stw r0,16(r7) ; (BRINGUP)
1787 stw r0,20(r7) ; (BRINGUP)
1788 stw r0,24(r7) ; (BRINGUP)
1789 stw r0,28(r7) ; (BRINGUP)
1790 b qqq9
1791
1792 qqq1: cmplwi r7,38 ; (BRINGUP)
1793 bgt qqq2 ; (BRINGUP)
1794 blt qqq3 ; (BRINGUP)
1795
1796 qqq0q: stb r3,0(r6) ; (BRINGUP)
1797 b qqq9 ; (BRINGUP)
1798
1799 qqq2: sth r3,0(r6) ; (BRINGUP)
1800 b qqq9 ; (BRINGUP)
1801
1802 qqq3: stw r3,0(r6) ; (BRINGUP)
1803
1804 qqq9:
1805 #if 0
1806 rlwinm r7,r6,0,0,29 ; (BRINGUP)
1807 lwz r0,0(r7) ; (BRINGUP) - Get newest value
1808 #else
1809 lis r7,hi16(0x000792B8) ; (BRINGUP)
1810 ori r7,r7,lo16(0x000792B8) ; (BRINGUP)
1811 lwz r0,0(r7) ; (BRINGUP) - Get newest value
1812 #endif
1813 mtmsr r9 ; (BRINGUP)
1814 mtspr dabr,r10 ; (BRINGUP)
1815 isync ; (BRINGUP)
1816
1817 #if 0
1818 lwz r28,patcharea+12(r2) ; (BRINGUP)
1819 mr. r28,r28 ; (BRINGUP)
1820 bne++ qqq12 ; (BRINGUP)
1821 lis r28,0x4000 ; (BRINGUP)
1822
1823 qqq12: stw r27,0(r28) ; (BRINGUP)
1824 lwz r6,savedar+4(r13) ; (BRINGUP)
1825 stw r0,4(r28) ; (BRINGUP)
1826 stw r4,8(r28) ; (BRINGUP)
1827 stw r6,12(r28) ; (BRINGUP)
1828 addi r28,r28,16 ; (BRINGUP)
1829 mr. r3,r3 ; (BRINGUP)
1830 stw r28,patcharea+12(r2) ; (BRINGUP)
1831 lwz r10,patcharea+8(r2) ; (BRINGUP)
1832 lwz r0,patcharea+4(r2) ; (BRINGUP)
1833 #endif
1834
1835 #if 1
1836 stw r0,patcharea(r2) ; (BRINGUP)
1837 #endif
1838
1839 #if 0
1840 xor r28,r0,r27 ; (BRINGUP) - See how much it changed
1841 rlwinm r28,r28,24,24,31 ; (BRINGUP)
1842 cmplwi r28,1 ; (BRINGUP)
1843
1844 ble++ qqq10 ; (BRINGUP)
1845
1846 mr r7,r0 ; (BRINGUP)
1847 li r0,1 ; (BRINGUP)
1848 stw r0,20(0) ; (BRINGUP)
1849 lis r0,hi16(Choke) ; (BRINGUP)
1850 ori r0,r0,lo16(Choke) ; (BRINGUP)
1851 sc ; (BRINGUP)
1852 #endif
1853
1854
1855 qqq10: addi r4,r4,4 ; (BRINGUP)
1856 stw r4,savesrr0+4(r13) ; (BRINGUP)
1857
1858 li r11,T_IN_VAIN ; (BRINGUP)
1859 b EatRupt ; (BRINGUP)
1860
1861 didnthit: ; (BRINGUP)
1862 #endif
1863 #if 0
1864 lwz r0,20(0) ; (BRINGUP)
1865 mr. r0,r0 ; (BRINGUP)
1866 beq++ opopop ; (BRINGUP)
1867 li r0,0 ; (BRINGUP)
1868 stw r0,20(0) ; (BRINGUP)
1869 lis r0,hi16(Choke) ; (BRINGUP)
1870 ori r0,r0,lo16(Choke) ; (BRINGUP)
1871 sc ; (BRINGUP)
1872 opopop:
1873 #endif
1874 lwz r0,savesrr1+4(r13) ; Get the MSR in use at exception time
1875 cmplwi cr1,r11,T_IN_VAIN ; Was it handled?
1876 rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1877 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
1878 beq-- NoFamPf
1879 mfsprg r2,0 ; Get back per_proc
1880 lwz r1,spcFlags(r2) ; Load spcFlags
1881 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1882 cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode
1883 bne-- cr0,NoFamPf
1884 lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept
1885 li r5,0 ; Clear
1886 srwi r1,r11,2 ; divide r11 by 4
1887 oris r5,r5,0x8000 ; Set r5 to 0x80000000
1888 srw r1,r5,r1 ; Set bit for current exception
1889 and. r1,r1,r6 ; And current exception with the intercept mask
1890 beq++ NoFamPf ; Is it FAM intercept
1891 bl EXT(vmm_fam_pf)
1892 b EatRupt
1893
1894 NoFamPf: andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on
1895 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
1896 add r0,r0,r0 ; Get 0xFFFFFFFF00000000
1897 beq++ PassUpTrap ; Not on, normal case...
1898 ;
1899 ; Here is where we handle the "recovery mode" stuff.
1900 ; This is set by an emulation routine to trap any faults when it is fetching data or
1901 ; instructions.
1902 ;
1903 ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0
1904 ; and R1 to the DAR and DSISR, respectively.
1905 ;
1906 lwz r3,savesrr0(r13) ; Get the failing instruction address
1907 lwz r4,savesrr0+4(r13) ; Get the failing instruction address
1908 lwz r5,savecr(r13) ; Get the condition register
1909 or r4,r4,r0 ; Fill the high part with foxes
1910 lwz r0,savedar(r13) ; Get the DAR
1911 addic r4,r4,4 ; Skip failing instruction
1912 lwz r6,savedar+4(r13) ; Get the DAR
1913 addze r3,r3 ; Propagate carry
1914 rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed
1915 lwz r7,savedsisr(r13) ; Grab the DSISR
1916 stw r3,savesrr0(r13) ; Save resume address
1917 stw r4,savesrr0+4(r13) ; Save resume address
1918 stw r5,savecr(r13) ; And the resume CR
1919 stw r0,saver0(r13) ; Pass back the DAR
1920 stw r6,saver0+4(r13) ; Pass back the DAR
1921 stw r7,saver1+4(r13) ; Pass back the DSISR
1922 b EatRupt ; Resume emulated code
1923
1924 ;
1925 ; Here is where we handle the context switch firmware call. The old
1926 ; context has been saved. The new savearea is in kind of hokey, the high order
1927 ; half is stored in saver7 and the low half is in saver3. We will just
1928 ; muck around with the savearea pointers, and then join the exit routine
1929 ;
1930
1931 .align 5
1932
1933 conswtch:
1934 li r0,0xFFF ; Get page boundary
1935 mr r29,r13 ; Save the save
1936 andc r30,r13,r0 ; Round down to page boundary (64-bit safe)
1937 lwz r5,saver3+4(r13) ; Switch to the new savearea
1938 bf-- pf64Bitb,xcswNo64 ; Not 64-bit...
1939 lwz r6,saver7+4(r13) ; Get the high order half
1940 sldi r6,r6,32 ; Position high half
1941 or r5,r5,r6 ; Merge them
1942
1943 xcswNo64: lwz r30,SACvrswap+4(r30) ; get real to virtual translation
1944 mr r13,r5 ; Switch saveareas
1945 li r0,0 ; Clear this
1946 xor r27,r29,r30 ; Flip to virtual
1947 stw r0,saver3(r5) ; Push the new virtual savearea to the switch to routine
1948 stw r27,saver3+4(r5) ; Push the new virtual savearea to the switch to routine
1949 b EatRupt ; Start it up...
1950
1951 ;
1952 ; Handle machine check here.
1953 ;
1954 ; ?
1955 ;
1956
1957 .align 5
1958
1959 MachineCheck:
1960
1961 bt++ pf64Bitb,mck64 ; ?
1962
1963 lwz r27,savesrr1+4(r13) ; Pick up srr1
1964
1965 ;
1966 ; Check if the failure was in
1967 ; ml_probe_read. If so, this is expected, so modify the PC to
1968 ; ml_proble_read_mck and then eat the exception.
1969 ;
1970 lwz r30,savesrr0+4(r13) ; Get the failing PC
1971 lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part
1972 lis r27,hi16(EXT(ml_probe_read)) ; High order part
1973 ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part
1974 ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part
1975 cmplw r30,r28 ; Check highest possible
1976 cmplw cr1,r30,r27 ; Check lowest
1977 bge- PassUpTrap ; Outside of range
1978 blt- cr1,PassUpTrap ; Outside of range
1979 ;
1980 ; We need to fix up the BATs here because the probe
1981 ; routine messed them all up... As long as we are at it,
1982 ; fix up to return directly to caller of probe.
1983 ;
1984
1985 lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
1986 ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
1987
1988 lwz r30,0(r11) ; Pick up DBAT 0 high
1989 lwz r28,4(r11) ; Pick up DBAT 0 low
1990 lwz r27,8(r11) ; Pick up DBAT 1 high
1991 lwz r18,16(r11) ; Pick up DBAT 2 high
1992 lwz r11,24(r11) ; Pick up DBAT 3 high
1993
1994 sync
1995 mtdbatu 0,r30 ; Restore DBAT 0 high
1996 mtdbatl 0,r28 ; Restore DBAT 0 low
1997 mtdbatu 1,r27 ; Restore DBAT 1 high
1998 mtdbatu 2,r18 ; Restore DBAT 2 high
1999 mtdbatu 3,r11 ; Restore DBAT 3 high
2000 sync
2001
2002 lwz r28,savelr+4(r13) ; Get return point
2003 lwz r27,saver0+4(r13) ; Get the saved MSR
2004 li r30,0 ; Get a failure RC
2005 stw r28,savesrr0+4(r13) ; Set the return point
2006 stw r27,savesrr1+4(r13) ; Set the continued MSR
2007 stw r30,saver3+4(r13) ; Set return code
2008 b EatRupt ; Yum, yum, eat it all up...
2009
2010 ;
2011 ; 64-bit machine checks
2012 ;
2013
2014 mck64:
2015
2016 ;
2017 ; NOTE: WE NEED TO RETHINK RECOVERABILITY A BIT - radar 3167190
2018 ;
2019
2020 ld r23,savesrr0(r13) ; Grab the SRR0 in case we need bad instruction
2021 ld r20,savesrr1(r13) ; Grab the SRR1 so we can decode the thing
2022 lwz r21,savedsisr(r13) ; We might need this in a bit
2023 ld r22,savedar(r13) ; We might need this in a bit
2024
2025 lis r8,AsyMCKSrc ; Get the Async MCK Source register address
2026 mfsprg r19,2 ; Get the feature flags
2027 ori r8,r8,0x8000 ; Set to read data
2028 rlwinm. r0,r19,0,pfSCOMFixUpb,pfSCOMFixUpb ; Do we need to fix the SCOM data?
2029
2030 sync
2031
2032 mtspr scomc,r8 ; Request the MCK source
2033 mfspr r24,scomd ; Get the source
2034 mfspr r8,scomc ; Get back the status (we just ignore it)
2035 sync
2036 isync
2037
2038 lis r8,AsyMCKRSrc ; Get the Async MCK Source AND mask address
2039 li r9,0 ; Get and AND mask of 0
2040
2041 sync
2042
2043 mtspr scomd,r9 ; Set the AND mask to 0
2044 mtspr scomc,r8 ; Write the AND mask and clear conditions
2045 mfspr r8,scomc ; Get back the status (we just ignore it)
2046 sync
2047 isync
2048
2049 lis r8,cFIR ; Get the Core FIR register address
2050 ori r8,r8,0x8000 ; Set to read data
2051
2052 sync
2053
2054 mtspr scomc,r8 ; Request the Core FIR
2055 mfspr r25,scomd ; Get the source
2056 mfspr r8,scomc ; Get back the status (we just ignore it)
2057 sync
2058 isync
2059
2060 lis r8,cFIRrst ; Get the Core FIR AND mask address
2061
2062 sync
2063
2064 mtspr scomd,r9 ; Set the AND mask to 0
2065 mtspr scomc,r8 ; Write the AND mask and clear conditions
2066 mfspr r8,scomc ; Get back the status (we just ignore it)
2067 sync
2068 isync
2069
2070 lis r8,l2FIR ; Get the L2 FIR register address
2071 ori r8,r8,0x8000 ; Set to read data
2072
2073 sync
2074
2075 mtspr scomc,r8 ; Request the L2 FIR
2076 mfspr r26,scomd ; Get the source
2077 mfspr r8,scomc ; Get back the status (we just ignore it)
2078 sync
2079 isync
2080
2081 lis r8,l2FIRrst ; Get the L2 FIR AND mask address
2082
2083 sync
2084
2085 mtspr scomd,r9 ; Set the AND mask to 0
2086 mtspr scomc,r8 ; Write the AND mask and clear conditions
2087 mfspr r8,scomc ; Get back the status (we just ignore it)
2088 sync
2089 isync
2090
2091 lis r8,busFIR ; Get the Bus FIR register address
2092 ori r8,r8,0x8000 ; Set to read data
2093
2094 sync
2095
2096 mtspr scomc,r8 ; Request the Bus FIR
2097 mfspr r27,scomd ; Get the source
2098 mfspr r8,scomc ; Get back the status (we just ignore it)
2099 sync
2100 isync
2101
2102 lis r8,busFIRrst ; Get the Bus FIR AND mask address
2103
2104 sync
2105
2106 mtspr scomd,r9 ; Set the AND mask to 0
2107 mtspr scomc,r8 ; Write the AND mask and clear conditions
2108 mfspr r8,scomc ; Get back the status (we just ignore it)
2109 sync
2110 isync
2111
2112 ; Note: bug in early chips where scom reads are shifted right by 1. We fix that here.
2113 ; Also note that we will lose bit 63
2114
2115 beq++ mckNoFix ; No fix up is needed
2116 sldi r24,r24,1 ; Shift left 1
2117 sldi r25,r25,1 ; Shift left 1
2118 sldi r26,r26,1 ; Shift left 1
2119 sldi r27,r27,1 ; Shift left 1
2120
2121 mckNoFix: std r24,savexdat0(r13) ; Save the MCK source in case we pass the error
2122 std r25,savexdat1(r13) ; Save the Core FIR in case we pass the error
2123 std r26,savexdat2(r13) ; Save the L2 FIR in case we pass the error
2124 std r27,savexdat3(r13) ; Save the BUS FIR in case we pass the error
2125
2126 rlwinm. r0,r20,0,mckIFUE-32,mckIFUE-32 ; Is this some kind of uncorrectable?
2127 bne mckUE ; Yeah...
2128
2129 rlwinm. r0,r20,0,mckLDST-32,mckLDST-32 ; Some kind of load/store error?
2130 bne mckHandleLDST ; Yes...
2131
2132 rldicl. r0,r20,46,62 ; Get the error cause code
2133 beq mckNotSure ; We need some more checks for this one...
2134
2135 cmplwi r0,2 ; Check for TLB parity error
2136 blt mckSLBparity ; This is an SLB parity error...
2137 bgt mckhIFUE ; This is an IFetch tablewalk reload UE...
2138
2139 ; IFetch TLB parity error
2140
2141 isync
2142 tlbiel r23 ; Locally invalidate TLB entry for iaddr
2143 sync ; Wait for it
2144 b ceMck ; All recovered...
2145
2146 ; SLB parity error. This could be software caused. We get one if there is
2147 ; more than 1 valid SLBE with a matching ESID. That one we do not want to
2148 ; try to recover from. Search for it and if we get it, panic.
2149
2150 mckSLBparity:
2151 crclr cr0_eq ; Make sure we are not equal so we take correct exit
2152
2153 la r3,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2154 li r5,0 ; Start with index 0
2155
2156 mckSLBck: la r4,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2157 slbmfee r6,r5 ; Get the next SLBE
2158 andis. r0,r6,0x0800 ; See if valid bit is on
2159 beq mckSLBnx ; Skip invalid and go to next
2160
2161 mckSLBck2: cmpld r4,r3 ; Have we reached the end of the table?
2162 beq mckSLBne ; Yes, go enter this one...
2163 ld r7,0(r4) ; Pick up the saved ESID
2164 cmpld r6,r7 ; Is this a match?
2165 beq mckSLBrec ; Whoops, I did bad, recover and pass up...
2166 addi r4,r4,8 ; Next table entry
2167 b mckSLBck2 ; Check the next...
2168
2169 mckSLBnx: addi r5,r5,1 ; Point to next SLBE
2170 cmplwi r5,64 ; Have we checked all of them?
2171 bne++ mckSLBck ; Not yet, check again...
2172 b mckSLBrec ; We looked at them all, go recover...
2173
2174 mckSLBne: std r6,0(r3) ; Save this ESID
2175 addi r3,r3,8 ; Point to the new slot
2176 b mckSLBnx ; Go do the next SLBE...
2177
2178 ; Recover an SLB error
2179
2180 mckSLBrec: li r0,0 ; Set an SLB slot index of 0
2181 slbia ; Trash all SLB entries (except for entry 0 that is)
2182 slbmfee r7,r0 ; Get the entry that is in SLB index 0
2183 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
2184 slbie r7 ; Invalidate it
2185
2186 li r3,0 ; Set the first SLBE
2187
2188 mckSLBclr: slbmte r0,r3 ; Clear the whole entry to 0s
2189 addi r3,r3,1 ; Bump index
2190 cmplwi cr1,r3,64 ; Have we done them all?
2191 bne++ cr1,mckSLBclr ; Yup....
2192
2193 sth r3,ppInvSeg(r2) ; Store non-zero to trigger SLB reload
2194 bne++ ceMck ; This was not a programming error, all recovered...
2195 b ueMck ; Pass the software error up...
2196
2197 ;
2198 ; Handle a load/store unit error. We need to decode the DSISR
2199 ;
2200
2201 mckHandleLDST:
2202 rlwinm. r0,r21,0,mckL1DCPE,mckL1DCPE ; An L1 data cache parity error?
2203 bne++ mckL1D ; Yeah, we dealt with this back in the vector...
2204
2205 rlwinm. r0,r21,0,mckL1DTPE,mckL1DTPE ; An L1 tag error?
2206 bne++ mckL1T ; Yeah, we dealt with this back in the vector...
2207
2208 rlwinm. r0,r21,0,mckUEdfr,mckUEdfr ; Is the a "deferred" UE?
2209 bne mckDUE ; Yeah, go see if expected...
2210
2211 rlwinm. r0,r21,0,mckUETwDfr,mckUETwDfr ; Is the a "deferred" tablewalk UE?
2212 bne mckDTW ; Yeah, no recovery...
2213
2214 rlwinm. r0,r21,0,mckSLBPE,mckSLBPE ; SLB parity error?
2215 bne mckSLBparity ; Yeah, go attempt recovery....
2216
2217 ; This is a recoverable D-ERAT or TLB error
2218
2219 la r9,hwMckERCPE(r2) ; Get DERAT parity error count
2220
2221 mckInvDAR: isync
2222 tlbiel r22 ; Locally invalidate the TLB entry
2223 sync
2224
2225 lwz r21,0(r9) ; Get count
2226 addi r21,r21,1 ; Count this one
2227 stw r21,0(r9) ; Stick it back
2228
2229 b ceMck ; All recovered...
2230
2231 ;
2232 ; When we come here, we are not quite sure what the error is. We need to
2233 ; dig a bit further.
2234 ;
2235 ; R24 is interrupt source
2236 ; R25 is Core FIR
2237 ;
2238 ; Note that both have been cleared already.
2239 ;
2240
2241 mckNotSure:
2242 rldicl. r0,r24,AsyMCKfir+1,63 ; Something in the FIR?
2243 bne-- mckFIR ; Yup, go check some more...
2244
2245 rldicl. r0,r24,AsyMCKhri+1,63 ; Hang recovery?
2246 bne-- mckHangRcvr ; Yup...
2247
2248 rldicl. r0,r24,AsyMCKext+1,63 ; External signal?
2249 bne-- mckExtMck ; Yup...
2250
2251 ;
2252 ; We really do not know what this one is or what to do with it...
2253 ;
2254
2255 mckUnk: lwz r21,hwMckUnk(r2) ; Get unknown error count
2256 addi r21,r21,1 ; Count it
2257 stw r21,hwMckUnk(r2) ; Stuff it
2258 b ueMck ; Go south, young man...
2259
2260 ;
2261 ; Hang recovery. This is just a notification so we only count.
2262 ;
2263
2264 mckHangRcrvr:
2265 lwz r21,hwMckHang(r2) ; Get hang recovery count
2266 addi r21,r21,1 ; Count this one
2267 stw r21,hwMckHang(r2) ; Stick it back
2268 b ceMck ; All recovered...
2269
2270 ;
2271 ; Externally signaled MCK. No recovery for the moment, but we this may be
2272 ; where we handle ml_probe_read problems eventually.
2273 ;
2274 mckExtMck:
2275 lwz r21,hwMckHang(r2) ; Get hang recovery count
2276 addi r21,r21,1 ; Count this one
2277 stw r21,hwMckHang(r2) ; Stick it back
2278 b ceMck ; All recovered...
2279
2280 ;
2281 ; Machine check cause is in a FIR. Suss it out here.
2282 ; Core FIR is in R25 and has been cleared in HW.
2283 ;
2284
2285 mckFIR: rldicl. r0,r25,cFIRICachePE+1,63 ; I-Cache parity error?
2286 la r19,hwMckICachePE(r2) ; Point to counter
2287 bne mckInvICache ; Go invalidate I-Cache...
2288
2289 rldicl. r0,r25,cFIRITagPE0+1,63 ; I-Cache tag parity error?
2290 la r19,hwMckITagPE(r2) ; Point to counter
2291 bne mckInvICache ; Go invalidate I-Cache...
2292
2293 rldicl. r0,r25,cFIRITagPE1+1,63 ; I-Cache tag parity error?
2294 la r19,hwMckITagPE(r2) ; Point to counter
2295 bne mckInvICache ; Go invalidate I-Cache...
2296
2297 rldicl. r0,r25,cFIRIEratPE+1,63 ; IERAT parity error?
2298 la r19,hwMckIEratPE(r2) ; Point to counter
2299 bne mckInvERAT ; Go invalidate ERATs...
2300
2301 rldicl. r0,r25,cFIRIFUL2UE+1,63 ; IFetch got L2 UE?
2302 bne mckhIFUE ; Go count and pass up...
2303
2304 rldicl. r0,r25,cFIRDCachePE+1,63 ; D-Cache PE?
2305 bne mckL1D ; Handled, just go count...
2306
2307 rldicl. r0,r25,cFIRDTagPE+1,63 ; D-Cache tag PE?
2308 bne mckL1T ; Handled, just go count...
2309
2310 rldicl. r0,r25,cFIRDEratPE+1,63 ; DERAT PE?
2311 la r19,hwMckDEratPE(r2) ; Point to counter
2312 bne mckInvERAT ; Go invalidate ERATs...
2313
2314 rldicl. r0,r25,cFIRTLBPE+1,63 ; TLB PE?
2315 la r9,hwMckTLBPE(r2) ; Get TLB parity error count
2316 bne mckInvDAR ; Go recover...
2317
2318 rldicl. r0,r25,cFIRSLBPE+1,63 ; SLB PE?
2319 bne mckSLBparity ; Cope with it...
2320
2321 b mckUnk ; Have not a clue...
2322
2323 ;
2324 ; General recovery for I-Cache errors. Just flush it completely.
2325 ;
2326
2327 .align 7 ; Force into cache line
2328
2329 mckInvICache:
2330 lis r0,0x0080 ; Get a 0x0080 (bit 9 >> 32)
2331 mfspr r21,hid1 ; Get the current HID1
2332 sldi r0,r0,32 ; Get the "forced ICBI match" bit
2333 or r0,r0,r21 ; Set forced match
2334
2335 isync
2336 mtspr hid1,r0 ; Stick it
2337 mtspr hid1,r0 ; Stick it again
2338 isync
2339
2340 li r6,0 ; Start at 0
2341
2342 mckIcbi: icbi 0,r6 ; Kill I$
2343 addi r6,r6,128 ; Next line
2344 andis. r5,r6,1 ; Have we done them all?
2345 beq++ mckIcbi ; Not yet...
2346
2347 isync
2348 mtspr hid1,r21 ; Restore original HID1
2349 mtspr hid1,r21 ; Stick it again
2350 isync
2351
2352 lwz r5,0(r19) ; Get the counter
2353 addi r5,r5,1 ; Count it
2354 stw r5,0(r19) ; Stuff it back
2355 b ceMck ; All recovered...
2356
2357
2358 ; General recovery for ERAT problems - handled in exception vector already
2359
2360 mckInvERAT: lwz r21,0(r19) ; Get the exception count spot
2361 addi r21,r21,1 ; Count this one
2362 stw r21,0(r19) ; Save count
2363 b ceMck ; All recovered...
2364
2365 ; General hang recovery - this is a notification only, just count.
2366
2367 mckHangRcvr:
2368 lwz r21,hwMckHang(r2) ; Get hang recovery count
2369 addi r21,r21,1 ; Count this one
2370 stw r21,hwMckHang(r2) ; Stick it back
2371 b ceMck ; All recovered...
2372
2373
2374 ;
2375 ; These are the uncorrectable errors, just count them then pass it along.
2376 ;
2377
2378 mckUE: lwz r21,hwMckUE(r2) ; Get general uncorrectable error count
2379 addi r21,r21,1 ; Count it
2380 stw r21,hwMckUE(r2) ; Stuff it
2381 b ueMck ; Go south, young man...
2382
2383 mckhIFUE: lwz r21,hwMckIUEr(r2) ; Get I-Fetch TLB reload uncorrectable error count
2384 addi r21,r21,1 ; Count it
2385 stw r21,hwMckIUEr(r2) ; Stuff it
2386 b ueMck ; Go south, young man...
2387
2388 mckDUE: lwz r21,hwMckDUE(r2) ; Get deferred uncorrectable error count
2389 addi r21,r21,1 ; Count it
2390 stw r21,hwMckDUE(r2) ; Stuff it
2391
2392 ;
2393 ; Right here is where we end up after a failure on a ml_probe_read_64.
2394 ; We will check if that is the case, and if so, fix everything up and
2395 ; return from it.
2396
2397 lis r8,hi16(EXT(ml_probe_read_64)) ; High of start
2398 lis r9,hi16(EXT(ml_probe_read_mck_64)) ; High of end
2399 ori r8,r8,lo16(EXT(ml_probe_read_64)) ; Low of start
2400 ori r9,r9,lo16(EXT(ml_probe_read_mck_64)) ; Low of end
2401 cmpld r23,r8 ; Too soon?
2402 cmpld cr1,r23,r9 ; Too late?
2403
2404 cror cr0_lt,cr0_lt,cr1_gt ; Too soon or too late?
2405 ld r3,saver12(r13) ; Get the original MSR
2406 ld r5,savelr(r13) ; Get the return address
2407 li r4,0 ; Get fail code
2408 blt-- ueMck ; This is a normal machine check, just pass up...
2409 std r5,savesrr0(r13) ; Set the return MSR
2410
2411 std r3,savesrr1(r13) ; Set the return address
2412 std r4,saver3(r13) ; Set failure return code
2413 b ceMck ; All recovered...
2414
2415 mckDTW: lwz r21,hwMckDTW(r2) ; Get deferred tablewalk uncorrectable error count
2416 addi r21,r21,1 ; Count it
2417 stw r21,hwMckDTW(r2) ; Stuff it
2418 b ueMck ; Go south, young man...
2419
2420 mckL1D: lwz r21,hwMckL1DPE(r2) ; Get data cache parity error count
2421 addi r21,r21,1 ; Count it
2422 stw r21,hwMckL1DPE(r2) ; Stuff it
2423 b ceMck ; All recovered...
2424
2425 mckL1T: lwz r21,hwMckL1TPE(r2) ; Get TLB parity error count
2426 addi r21,r21,1 ; Count it
2427 stw r21,hwMckL1TPE(r2) ; Stuff it
2428
2429 ceMck: li r0,1 ; Set the recovered flag before passing up
2430 stw r0,savemisc3(r13) ; Set it
2431 b PassUpTrap ; Go up and log error...
2432
2433 ueMck: li r0,0 ; Set the unrecovered flag before passing up
2434 stw r0,savemisc3(r13) ; Set it
2435 b PassUpTrap ; Go up and log error and probably panic
2436
2437
2438 /*
2439 * Here's where we come back from some instruction emulator. If we come back with
2440 * T_IN_VAIN, the emulation is done and we should just reload state and directly
2441 * go back to the interrupted code. Otherwise, we'll check to see if
2442 * we need to redrive with a different interrupt, i.e., DSI.
2443 * Note that this we are actually not redriving the rupt, rather changing it
2444 * into a different one. Thus we clear the redrive bit.
2445 */
2446
2447 .align 5
2448 .globl EXT(EmulExit)
2449
2450 LEXT(EmulExit)
2451
2452 cmplwi cr1,r11,T_IN_VAIN ; Was it emulated?
2453 lis r1,hi16(SAVredrive) ; Get redrive request
2454 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
2455 lwz r4,SAVflags(r13) ; Pick up the flags
2456
2457 and. r0,r4,r1 ; Check if redrive requested
2458
2459 beq++ PassUpTrap ; No redrive, just keep on going...
2460
2461 b Redrive ; Redrive the exception...
2462
2463 ;
2464 ; Jump into main handler code switching on VM at the same time.
2465 ;
2466 ; We assume kernel data is mapped contiguously in physical
2467 ; memory, otherwise we would need to switch on (at least) virtual data.
2468 ; SRs are already set up.
2469 ;
2470
2471 .align 5
2472
2473 PassUpTrap: lis r20,hi16(EXT(thandler)) ; Get thandler address
2474 ori r20,r20,lo16(EXT(thandler)) ; Get thandler address
2475 b PassUp ; Go pass it up...
2476
2477 PassUpRupt: lis r20,hi16(EXT(ihandler)) ; Get ihandler address
2478 ori r20,r20,lo16(EXT(ihandler)) ; Get ihandler address
2479 b PassUp ; Go pass it up...
2480
2481 .align 5
2482
2483 PassUpFPU: lis r20,hi16(EXT(fpu_switch)) ; Get FPU switcher address
2484 ori r20,r20,lo16(EXT(fpu_switch)) ; Get FPU switcher address
2485 b PassUp ; Go pass it up...
2486
2487 PassUpVMX: lis r20,hi16(EXT(vec_switch)) ; Get VMX switcher address
2488 ori r20,r20,lo16(EXT(vec_switch)) ; Get VMX switcher address
2489 bt++ featAltivec,PassUp ; We have VMX on this CPU...
2490 li r11,T_PROGRAM ; Say that it is a program exception
2491 li r20,8 ; Set invalid instruction
2492 stw r11,saveexception(r13) ; Set the new the exception code
2493 sth r20,savesrr1+4(r13) ; Set the invalid instruction SRR code
2494
2495 b PassUpTrap ; Go pass it up...
2496
2497 .align 5
2498
2499 PassUpAbend:
2500 lis r20,hi16(EXT(chandler)) ; Get choke handler address
2501 ori r20,r20,lo16(EXT(chandler)) ; Get choke handler address
2502 b PassUp ; Go pass it up...
2503
2504 .align 5
2505
2506 PassUp:
2507 #if INSTRUMENT
2508 mfspr r29,pmc1 ; INSTRUMENT - saveinstr[11] - Take stamp at passup or eatrupt
2509 stw r29,0x6100+(11*16)+0x0(0) ; INSTRUMENT - Save it
2510 mfspr r29,pmc2 ; INSTRUMENT - Get stamp
2511 stw r29,0x6100+(11*16)+0x4(0) ; INSTRUMENT - Save it
2512 mfspr r29,pmc3 ; INSTRUMENT - Get stamp
2513 stw r29,0x6100+(11*16)+0x8(0) ; INSTRUMENT - Save it
2514 mfspr r29,pmc4 ; INSTRUMENT - Get stamp
2515 stw r29,0x6100+(11*16)+0xC(0) ; INSTRUMENT - Save it
2516 #endif
2517
2518 lwz r10,SAVflags(r13) ; Pick up the flags
2519
2520 li r0,0xFFF ; Get a page mask
2521 li r2,MASK(MSR_BE)|MASK(MSR_SE) ; Get the mask to save trace bits
2522 andc r5,r13,r0 ; Back off to the start of savearea block
2523 mfmsr r3 ; Get our MSR
2524 rlwinm r10,r10,0,SAVredriveb+1,SAVredriveb-1 ; Clear the redrive before we pass it up
2525 li r21,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value
2526 and r3,r3,r2 ; Clear all but trace
2527 lwz r5,SACvrswap+4(r5) ; Get real to virtual conversion
2528 or r21,r21,r3 ; Keep the trace bits if they are on
2529 stw r10,SAVflags(r13) ; Set the flags with the cleared redrive flag
2530 mr r3,r11 ; Pass the exception code in the paramter reg
2531 xor r4,r13,r5 ; Pass up the virtual address of context savearea
2532 mfsprg r29,0 ; Get the per_proc block back
2533 rlwinm r4,r4,0,0,31 ; Clean top half of virtual savearea if 64-bit
2534
2535 mr r3,r21 ; Pass in the MSR we will go to
2536 bl EXT(switchSegs) ; Go handle the segment registers/STB
2537
2538 #if INSTRUMENT
2539 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[7] - Take stamp afer switchsegs
2540 stw r30,0x6100+(7*16)+0x0(0) ; INSTRUMENT - Save it
2541 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2542 stw r30,0x6100+(7*16)+0x4(0) ; INSTRUMENT - Save it
2543 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2544 stw r30,0x6100+(7*16)+0x8(0) ; INSTRUMENT - Save it
2545 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2546 stw r30,0x6100+(7*16)+0xC(0) ; INSTRUMENT - Save it
2547 #endif
2548 lwz r3,saveexception(r13) ; Recall the exception code
2549
2550 mtsrr0 r20 ; Set up the handler address
2551 mtsrr1 r21 ; Set up our normal MSR value
2552
2553 bt++ pf64Bitb,puLaunch ; Handle 64-bit machine...
2554
2555 rfi ; Launch the exception handler
2556
2557 puLaunch: rfid ; Launch the exception handler
2558
2559 /*
2560 * This routine is the main place where we return from an interruption.
2561 *
2562 * This is also where we release the quickfret list. These are saveareas
2563 * that were released as part of the exception exit path in hw_exceptions.
2564 * In order to save an atomic operation (which actually will not work
2565 * properly on a 64-bit machine) we use holdQFret to indicate that the list
2566 * is in flux and should not be looked at here. This comes into play only
2567 * when we take a PTE miss when we are queuing a savearea onto qfret.
2568 * Quite rare but could happen. If the flag is set, this code does not
2569 * release the list and waits until next time.
2570 *
2571 * All we need to remember here is that R13 must point to the savearea
2572 * that has the context we need to load up. Translation and interruptions
2573 * must be disabled.
2574 *
2575 * This code always loads the context in the savearea pointed to
2576 * by R13. In the process, it throws away the savearea. If there
2577 * is any tomfoolery with savearea stacks, it must be taken care of
2578 * before we get here.
2579 *
2580 */
2581
2582 .align 5
2583
2584 EatRupt: mfsprg r29,0 ; Get the per_proc block back
2585 mr r31,r13 ; Move the savearea pointer to the far end of the register set
2586 mfsprg r27,2 ; Get the processor features
2587
2588 lwz r3,holdQFret(r29) ; Get the release hold off flag
2589
2590 bt++ pf64Bitb,eat64a ; Skip down to the 64-bit version of this
2591
2592 ;
2593 ; This starts the 32-bit version
2594 ;
2595
2596 mr. r3,r3 ; Should we hold off the quick release?
2597 lwz r30,quickfret+4(r29) ; Pick up the quick fret list, if any
2598 la r21,saver0(r31) ; Point to the first thing we restore
2599 bne- ernoqfret ; Hold off set, do not release just now...
2600
2601 erchkfret: mr. r3,r30 ; Any savearea to quickly release?
2602 beq+ ernoqfret ; No quickfrets...
2603 lwz r30,SAVprev+4(r30) ; Chain back now
2604
2605 bl EXT(save_ret_phys) ; Put it on the free list
2606 stw r30,quickfret+4(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
2607 b erchkfret ; Try the next one...
2608
2609 .align 5
2610
2611 ernoqfret:
2612 #if INSTRUMENT
2613 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[5] - Take stamp at saveareas released
2614 stw r30,0x6100+(5*16)+0x0(0) ; INSTRUMENT - Save it
2615 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2616 stw r30,0x6100+(5*16)+0x4(0) ; INSTRUMENT - Save it
2617 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2618 stw r30,0x6100+(5*16)+0x8(0) ; INSTRUMENT - Save it
2619 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2620 stw r30,0x6100+(5*16)+0xC(0) ; INSTRUMENT - Save it
2621 #endif
2622
2623 dcbt 0,r21 ; Touch in the first thing we need
2624
2625 ;
2626 ; Here we release the savearea.
2627 ;
2628 ; Important!!!! The savearea is released before we are done with it. When the
2629 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
2630 ; will trim the list, making the extra saveareas allocatable by another processor
2631 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
2632 ; we could be very, very unhappy. The code there always queues the "just released"
2633 ; savearea to the head of the local list. Then, if it needs to trim, it will
2634 ; start with the SECOND savearea, leaving ours intact.
2635 ;
2636 ;
2637
2638 mr r3,r31 ; Get the exiting savearea in parm register
2639 bl EXT(save_ret_phys) ; Put it on the free list
2640 #if INSTRUMENT
2641 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[6] - Take stamp afer savearea released
2642 stw r3,0x6100+(6*16)+0x0(0) ; INSTRUMENT - Save it
2643 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
2644 stw r3,0x6100+(6*16)+0x4(0) ; INSTRUMENT - Save it
2645 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
2646 stw r3,0x6100+(6*16)+0x8(0) ; INSTRUMENT - Save it
2647 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
2648 stw r3,0x6100+(6*16)+0xC(0) ; INSTRUMENT - Save it
2649 #endif
2650
2651 lwz r3,savesrr1+4(r31) ; Pass in the MSR we are going to
2652 bl EXT(switchSegs) ; Go handle the segment registers/STB
2653 #if INSTRUMENT
2654 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[10] - Take stamp afer switchsegs
2655 stw r30,0x6100+(10*16)+0x0(0) ; INSTRUMENT - Save it
2656 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2657 stw r30,0x6100+(10*16)+0x4(0) ; INSTRUMENT - Save it
2658 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2659 stw r30,0x6100+(10*16)+0x8(0) ; INSTRUMENT - Save it
2660 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2661 stw r30,0x6100+(10*16)+0xC(0) ; INSTRUMENT - Save it
2662 #endif
2663 li r3,savesrr1+4 ; Get offset to the srr1 value
2664
2665 lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags
2666 lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away)
2667
2668 rlwinm r25,r26,27,22,22 ; Move PR bit to BE
2669
2670 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
2671
2672 rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
2673 li r21,emfp0 ; Point to the fp savearea
2674 and r9,r9,r25 ; Clear BE if supervisor state
2675 or r26,r26,r9 ; Flip on the BE bit for special trace if needed
2676 stwcx. r26,r3,r31 ; Blow away any reservations we hold (and set BE)
2677
2678 lwz r25,savesrr0+4(r31) ; Get the SRR0 to use
2679
2680 la r28,saver4(r31) ; Point to the 32-byte line with r4-r7
2681 dcbz r21,r29 ; Clear a work area
2682 lwz r0,saver0+4(r31) ; Restore R0
2683 dcbt 0,r28 ; Touch in r4-r7
2684 lwz r1,saver1+4(r31) ; Restore R1
2685 lwz r2,saver2+4(r31) ; Restore R2
2686 la r28,saver8(r31) ; Point to the 32-byte line with r8-r11
2687 lwz r3,saver3+4(r31) ; Restore R3
2688 andis. r6,r27,hi16(pfAltivec) ; Do we have altivec on the machine?
2689 dcbt 0,r28 ; touch in r8-r11
2690 lwz r4,saver4+4(r31) ; Restore R4
2691 la r28,saver12(r31) ; Point to the 32-byte line with r12-r15
2692 mtsrr0 r25 ; Restore the SRR0 now
2693 lwz r5,saver5+4(r31) ; Restore R5
2694 mtsrr1 r26 ; Restore the SRR1 now
2695 lwz r6,saver6+4(r31) ; Restore R6
2696
2697 dcbt 0,r28 ; touch in r12-r15
2698 la r28,saver16(r31)
2699
2700 lwz r7,saver7+4(r31) ; Restore R7
2701 lwz r8,saver8+4(r31) ; Restore R8
2702 lwz r9,saver9+4(r31) ; Restore R9
2703
2704 dcbt 0,r28 ; touch in r16-r19
2705 la r28,saver20(r31)
2706
2707 lwz r10,saver10+4(r31) ; Restore R10
2708 lwz r11,saver11+4(r31) ; Restore R11
2709
2710 dcbt 0,r28 ; touch in r20-r23
2711 la r28,savevscr(r31) ; Point to the status area
2712
2713 lwz r12,saver12+4(r31) ; Restore R12
2714 lwz r13,saver13+4(r31) ; Restore R13
2715
2716 la r14,savectr+4(r31)
2717 dcbt 0,r28 ; Touch in VSCR and FPSCR
2718 dcbt 0,r14 ; touch in CTR, DAR, DSISR, VRSAVE, and Exception code
2719
2720 lwz r26,next_savearea+4(r29) ; Get the exception save area
2721 la r28,saver24(r31)
2722
2723 lwz r14,saver14+4(r31) ; Restore R14
2724 lwz r15,saver15+4(r31) ; Restore R15
2725
2726
2727 stfd f0,emfp0(r29) ; Save FP0
2728 lwz r27,savevrsave(r31) ; Get the vrsave
2729 dcbt 0,r28 ; touch in r24-r27
2730 la r28,savevscr(r31) ; Point to the status area
2731 lfd f0,savefpscrpad(r31) ; Get the fpscr
2732 la r22,saver28(r31)
2733 mtfsf 0xFF,f0 ; Restore fpscr
2734 lfd f0,emfp0(r29) ; Restore the used register
2735
2736 beq noavec3 ; No Altivec on this CPU...
2737
2738 stvxl v0,r21,r29 ; Save a vector register
2739 lvxl v0,0,r28 ; Get the vector status
2740 mtspr vrsave,r27 ; Set the vrsave
2741 mtvscr v0 ; Set the vector status
2742 lvxl v0,r21,r29 ; Restore work vector register
2743
2744 noavec3: dcbt 0,r22 ; touch in r28-r31
2745
2746 lwz r23,spcFlags(r29) ; Get the special flags from per_proc
2747 la r17,savesrr0(r31)
2748 la r26,saver0(r26) ; Point to the first part of the next savearea
2749 dcbt 0,r17 ; touch in SRR0, SRR1, CR, XER, LR
2750 lhz r28,pfrptdProc(r29) ; Get the reported processor type
2751
2752 lwz r16,saver16+4(r31) ; Restore R16
2753 lwz r17,saver17+4(r31) ; Restore R17
2754 lwz r18,saver18+4(r31) ; Restore R18
2755 lwz r19,saver19+4(r31) ; Restore R19
2756 lwz r20,saver20+4(r31) ; Restore R20
2757 lwz r21,saver21+4(r31) ; Restore R21
2758 lwz r22,saver22+4(r31) ; Restore R22
2759
2760 cmpwi cr1,r28,CPU_SUBTYPE_POWERPC_750 ; G3?
2761
2762 dcbz 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
2763
2764 andis. r23,r23,hi16(perfMonitor) ; Is the performance monitor enabled?
2765 lwz r23,saver23+4(r31) ; Restore R23
2766 cmpwi cr2,r28,CPU_SUBTYPE_POWERPC_7400 ; Yer standard G4?
2767 lwz r24,saver24+4(r31) ; Restore R24
2768 lwz r25,saver25+4(r31) ; Restore R25
2769 lwz r26,saver26+4(r31) ; Restore R26
2770 lwz r27,saver27+4(r31) ; Restore R27
2771
2772 beq+ noPerfMonRestore32 ; No perf monitor...
2773
2774 beq- cr1,perfMonRestore32_750 ; This is a G3...
2775 beq- cr2,perfMonRestore32_7400 ; Standard G4...
2776
2777 lwz r28,savepmc+16(r31)
2778 lwz r29,savepmc+20(r31)
2779 mtspr pmc5,r28 ; Restore PMC5
2780 mtspr pmc6,r29 ; Restore PMC6
2781
2782 perfMonRestore32_7400:
2783 lwz r28,savemmcr2+4(r31)
2784 mtspr mmcr2,r28 ; Restore MMCR2
2785
2786 perfMonRestore32_750:
2787 lwz r28,savepmc+0(r31)
2788 lwz r29,savepmc+4(r31)
2789 mtspr pmc1,r28 ; Restore PMC1
2790 mtspr pmc2,r29 ; Restore PMC2
2791 lwz r28,savepmc+8(r31)
2792 lwz r29,savepmc+12(r31)
2793 mtspr pmc3,r28 ; Restore PMC3
2794 mtspr pmc4,r29 ; Restore PMC4
2795 lwz r28,savemmcr1+4(r31)
2796 lwz r29,savemmcr0+4(r31)
2797 mtspr mmcr1,r28 ; Restore MMCR1
2798 mtspr mmcr0,r29 ; Restore MMCR0
2799
2800 noPerfMonRestore32:
2801 lwz r28,savecr(r31) ; Get CR to restore
2802 lwz r29,savexer+4(r31) ; Get XER to restore
2803 mtcr r28 ; Restore the CR
2804 lwz r28,savelr+4(r31) ; Get LR to restore
2805 mtxer r29 ; Restore the XER
2806 lwz r29,savectr+4(r31) ; Get the CTR to restore
2807 mtlr r28 ; Restore the LR
2808 lwz r28,saver30+4(r31) ; Get R30
2809 mtctr r29 ; Restore the CTR
2810 lwz r29,saver31+4(r31) ; Get R31
2811 mtsprg 2,r28 ; Save R30 for later
2812 lwz r28,saver28+4(r31) ; Restore R28
2813 mtsprg 3,r29 ; Save R31 for later
2814 lwz r29,saver29+4(r31) ; Restore R29
2815
2816 mfsprg r31,0 ; Get per_proc
2817 mfsprg r30,2 ; Restore R30
2818 lwz r31,pfAvailable(r31) ; Get the feature flags
2819 mtsprg 2,r31 ; Set the feature flags
2820 mfsprg r31,3 ; Restore R31
2821
2822 rfi ; Click heels three times and think very hard that there is no place like home...
2823
2824 .long 0 ; Leave this here
2825 .long 0
2826 .long 0
2827 .long 0
2828 .long 0
2829 .long 0
2830 .long 0
2831 .long 0
2832
2833
2834 ;
2835 ; This starts the 64-bit version
2836 ;
2837
2838 .align 7
2839
2840 eat64a: ld r30,quickfret(r29) ; Pick up the quick fret list, if any
2841
2842 mr. r3,r3 ; Should we hold off the quick release?
2843 la r21,saver0(r31) ; Point to the first thing we restore
2844 bne-- ernoqfre64 ; Hold off set, do not release just now...
2845
2846 erchkfre64: mr. r3,r30 ; Any savearea to quickly release?
2847 beq+ ernoqfre64 ; No quickfrets...
2848 ld r30,SAVprev(r30) ; Chain back now
2849
2850 bl EXT(save_ret_phys) ; Put it on the free list
2851
2852 std r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
2853 b erchkfre64 ; Try the next one...
2854
2855 .align 7
2856
2857 ernoqfre64: dcbt 0,r21 ; Touch in the first thing we need
2858
2859 ;
2860 ; Here we release the savearea.
2861 ;
2862 ; Important!!!! The savearea is released before we are done with it. When the
2863 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
2864 ; will trim the list, making the extra saveareas allocatable by another processor
2865 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
2866 ; we could be very, very unhappy. The code there always queues the "just released"
2867 ; savearea to the head of the local list. Then, if it needs to trim, it will
2868 ; start with the SECOND savearea, leaving ours intact.
2869 ;
2870 ;
2871
2872 li r3,lgKillResv ; Get spot to kill reservation
2873 stdcx. r3,0,r3 ; Blow away any reservations we hold
2874
2875 mr r3,r31 ; Get the exiting savearea in parm register
2876 bl EXT(save_ret_phys) ; Put it on the free list
2877
2878 lwz r3,savesrr1+4(r31) ; Pass in the MSR we will be going to
2879 bl EXT(switchSegs) ; Go handle the segment registers/STB
2880
2881 lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags
2882 ld r26,savesrr1(r31) ; Get destination MSR
2883 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
2884 rlwinm r25,r26,27,22,22 ; Move PR bit to BE
2885
2886 rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
2887 li r21,emfp0 ; Point to a workarea
2888 and r9,r9,r25 ; Clear BE if supervisor state
2889 or r26,r26,r9 ; Flip on the BE bit for special trace if needed
2890
2891 ld r25,savesrr0(r31) ; Get the SRR0 to use
2892 la r28,saver16(r31) ; Point to the 128-byte line with r16-r31
2893 dcbz128 r21,r29 ; Clear a work area
2894 ld r0,saver0(r31) ; Restore R0
2895 dcbt 0,r28 ; Touch in r16-r31
2896 ld r1,saver1(r31) ; Restore R1
2897 ld r2,saver2(r31) ; Restore R2
2898 ld r3,saver3(r31) ; Restore R3
2899 mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7)
2900 ld r4,saver4(r31) ; Restore R4
2901 mtsrr0 r25 ; Restore the SRR0 now
2902 ld r5,saver5(r31) ; Restore R5
2903 mtsrr1 r26 ; Restore the SRR1 now
2904 ld r6,saver6(r31) ; Restore R6
2905
2906 ld r7,saver7(r31) ; Restore R7
2907 ld r8,saver8(r31) ; Restore R8
2908 ld r9,saver9(r31) ; Restore R9
2909
2910 la r28,savevscr(r31) ; Point to the status area
2911
2912 ld r10,saver10(r31) ; Restore R10
2913 ld r11,saver11(r31) ; Restore R11
2914 ld r12,saver12(r31) ; Restore R12
2915 ld r13,saver13(r31) ; Restore R13
2916
2917 ld r26,next_savearea(r29) ; Get the exception save area
2918
2919 ld r14,saver14(r31) ; Restore R14
2920 ld r15,saver15(r31) ; Restore R15
2921 lwz r27,savevrsave(r31) ; Get the vrsave
2922
2923 bf-- pfAltivecb,noavec2s ; Skip if no VMX...
2924
2925 stvxl v0,r21,r29 ; Save a vector register
2926 lvxl v0,0,r28 ; Get the vector status
2927 mtvscr v0 ; Set the vector status
2928
2929 lvxl v0,r21,r29 ; Restore work vector register
2930
2931 noavec2s: mtspr vrsave,r27 ; Set the vrsave
2932
2933 lwz r28,saveexception(r31) ; Get exception type
2934 stfd f0,emfp0(r29) ; Save FP0
2935 lfd f0,savefpscrpad(r31) ; Get the fpscr
2936 mtfsf 0xFF,f0 ; Restore fpscr
2937 lfd f0,emfp0(r29) ; Restore the used register
2938 ld r16,saver16(r31) ; Restore R16
2939 lwz r30,spcFlags(r29) ; Get the special flags from per_proc
2940 ld r17,saver17(r31) ; Restore R17
2941 ld r18,saver18(r31) ; Restore R18
2942 cmplwi cr1,r28,T_RESET ; Are we returning from a reset?
2943 ld r19,saver19(r31) ; Restore R19
2944 ld r20,saver20(r31) ; Restore R20
2945 li r27,0 ; Get a zero
2946 ld r21,saver21(r31) ; Restore R21
2947 la r26,saver0(r26) ; Point to the first part of the next savearea
2948 andis. r30,r30,hi16(perfMonitor) ; Is the performance monitor enabled?
2949 ld r22,saver22(r31) ; Restore R22
2950 ld r23,saver23(r31) ; Restore R23
2951 bne++ cr1,er64rrst ; We are not returning from a reset...
2952 stw r27,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Allow resets again
2953
2954 er64rrst: ld r24,saver24(r31) ; Restore R24
2955
2956 dcbz128 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
2957
2958 ld r25,saver25(r31) ; Restore R25
2959 ld r26,saver26(r31) ; Restore R26
2960 ld r27,saver27(r31) ; Restore R27
2961
2962 beq++ noPerfMonRestore64 ; Nope...
2963
2964 lwz r28,savepmc+0(r31)
2965 lwz r29,savepmc+4(r31)
2966 mtspr pmc1_gp,r28 ; Restore PMC1
2967 mtspr pmc2_gp,r29 ; Restore PMC2
2968 lwz r28,savepmc+8(r31)
2969 lwz r29,savepmc+12(r31)
2970 mtspr pmc3_gp,r28 ; Restore PMC3
2971 mtspr pmc4_gp,r29 ; Restore PMC4
2972 lwz r28,savepmc+16(r31)
2973 lwz r29,savepmc+20(r31)
2974 mtspr pmc5_gp,r28 ; Restore PMC5
2975 mtspr pmc6_gp,r29 ; Restore PMC6
2976 lwz r28,savepmc+24(r31)
2977 lwz r29,savepmc+28(r31)
2978 mtspr pmc7_gp,r28 ; Restore PMC7
2979 mtspr pmc8_gp,r29 ; Restore PMC8
2980 ld r28,savemmcr1(r31)
2981 ld r29,savemmcr2(r31)
2982 mtspr mmcr1_gp,r28 ; Restore MMCR1
2983 mtspr mmcra_gp,r29 ; Restore MMCRA
2984 ld r28,savemmcr0(r31)
2985
2986 mtspr mmcr0_gp,r28 ; Restore MMCR0
2987
2988 noPerfMonRestore64:
2989 mfsprg r30,0 ; Get per_proc
2990 lwz r28,savecr(r31) ; Get CR to restore
2991 ld r29,savexer(r31) ; Get XER to restore
2992 mtcr r28 ; Restore the CR
2993 ld r28,savelr(r31) ; Get LR to restore
2994 mtxer r29 ; Restore the XER
2995 ld r29,savectr(r31) ; Get the CTR to restore
2996 mtlr r28 ; Restore the LR
2997 ld r28,saver30(r31) ; Get R30
2998 mtctr r29 ; Restore the CTR
2999 ld r29,saver31(r31) ; Get R31
3000 mtspr hsprg0,r28 ; Save R30 for later
3001 ld r28,saver28(r31) ; Restore R28
3002 mtsprg 3,r29 ; Save R31 for later
3003 ld r29,saver29(r31) ; Restore R29
3004
3005 lwz r31,pfAvailable(r30) ; Get the feature flags
3006 lwz r30,UAW(r30) ; Get the User Assist Word
3007 mtsprg 2,r31 ; Set the feature flags
3008 mfsprg r31,3 ; Restore R31
3009 mtsprg 3,r30 ; Set the UAW
3010 mfspr r30,hsprg0 ; Restore R30
3011
3012 rfid ; Click heels three times and think very hard that there is no place like home...
3013
3014
3015
3016 /*
3017 * exception_exit(savearea *)
3018 *
3019 *
3020 * ENTRY : IR and/or DR and/or interruptions can be on
3021 * R3 points to the virtual address of a savearea
3022 */
3023
3024 .align 5
3025 .globl EXT(exception_exit)
3026
3027 LEXT(exception_exit)
3028
3029 mfsprg r29,2 ; Get feature flags
3030 mr r31,r3 ; Get the savearea in the right register
3031 mtcrf 0x04,r29 ; Set the features
3032 li r0,1 ; Get this just in case
3033 mtcrf 0x02,r29 ; Set the features
3034 lis r30,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
3035 rlwinm r4,r3,0,0,19 ; Round down to savearea block base
3036 lis r1,hi16(SAVredrive) ; Get redrive request
3037 mfsprg r2,0 ; Get the per_proc block
3038 ori r30,r30,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
3039 bt++ pf64Bitb,eeSixtyFour ; We are 64-bit...
3040
3041 lwz r4,SACvrswap+4(r4) ; Get the virtual to real translation
3042
3043 bt pfNoMSRirb,eeNoMSR ; No MSR...
3044
3045 mtmsr r30 ; Translation and all off
3046 isync ; Toss prefetch
3047 b eeNoMSRx
3048
3049 .align 5
3050
3051 eeSixtyFour:
3052 ld r4,SACvrswap(r4) ; Get the virtual to real translation
3053 rldimi r30,r0,63,MSR_SF_BIT ; Set SF bit (bit 0)
3054 mtmsrd r30 ; Set 64-bit mode, turn off EE, DR, and IR
3055 isync ; Toss prefetch
3056 b eeNoMSRx
3057
3058 .align 5
3059
3060 eeNoMSR: li r0,loadMSR ; Get the MSR setter SC
3061 mr r3,r30 ; Get new MSR
3062 sc ; Set it
3063
3064 eeNoMSRx: xor r31,r31,r4 ; Convert the savearea to physical addressing
3065 lwz r4,SAVflags(r31) ; Pick up the flags
3066 mr r13,r31 ; Put savearea here also
3067
3068 #if INSTRUMENT
3069 mfspr r5,pmc1 ; INSTRUMENT - saveinstr[8] - stamp exception exit
3070 stw r5,0x6100+(8*16)+0x0(0) ; INSTRUMENT - Save it
3071 mfspr r5,pmc2 ; INSTRUMENT - Get stamp
3072 stw r5,0x6100+(8*16)+0x4(0) ; INSTRUMENT - Save it
3073 mfspr r5,pmc3 ; INSTRUMENT - Get stamp
3074 stw r5,0x6100+(8*16)+0x8(0) ; INSTRUMENT - Save it
3075 mfspr r5,pmc4 ; INSTRUMENT - Get stamp
3076 stw r5,0x6100+(8*16)+0xC(0) ; INSTRUMENT - Save it
3077 #endif
3078
3079
3080 and. r0,r4,r1 ; Check if redrive requested
3081
3082 dcbt br0,r2 ; We will need this in just a sec
3083
3084 beq+ EatRupt ; No redrive, just exit...
3085
3086 lwz r11,saveexception(r13) ; Restore exception code
3087 b Redrive ; Redrive the exception...
3088
3089
3090
3091 .align 12 ; Force page alignment
3092
3093 .globl EXT(ExceptionVectorsEnd)
3094 EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */
3095
3096
3097
3098
3099 ;
3100 ; Here is where we keep the low memory globals
3101 ;
3102
3103 . = 0x5000
3104 .globl EXT(lowGlo)
3105
3106 EXT(lowGlo):
3107
3108 .ascii "Hagfish " ; 5000 Unique eyecatcher
3109 .long 0 ; 5008 Zero
3110 .long 0 ; 500C Zero cont...
3111 .long EXT(per_proc_info) ; 5010 pointer to per_procs
3112 .long 0 ;
3113 .long 0 ; 5018 reserved
3114 .long 0 ; 501C reserved
3115 .long 0 ; 5020 reserved
3116 .long 0 ; 5024 reserved
3117 .long 0 ; 5028 reserved
3118 .long 0 ; 502C reserved
3119 .long 0 ; 5030 reserved
3120 .long 0 ; 5034 reserved
3121 .long 0 ; 5038 reserved
3122 .long 0 ; 503C reserved
3123 .long 0 ; 5040 reserved
3124 .long 0 ; 5044 reserved
3125 .long 0 ; 5048 reserved
3126 .long 0 ; 504C reserved
3127 .long 0 ; 5050 reserved
3128 .long 0 ; 5054 reserved
3129 .long 0 ; 5058 reserved
3130 .long 0 ; 505C reserved
3131 .long 0 ; 5060 reserved
3132 .long 0 ; 5064 reserved
3133 .long 0 ; 5068 reserved
3134 .long 0 ; 506C reserved
3135 .long 0 ; 5070 reserved
3136 .long 0 ; 5074 reserved
3137 .long 0 ; 5078 reserved
3138 .long 0 ; 507C reserved
3139
3140 .globl EXT(trcWork)
3141 EXT(trcWork):
3142 .long 0 ; 5080 The next trace entry to use
3143 #if DEBUG
3144 .long 0xFFFFFFFF ; 5084 All enabled
3145 #else
3146 .long 0x00000000 ; 5084 All disabled on non-debug systems
3147 #endif
3148 .long 0 ; 5088 Start of the trace table
3149 .long 0 ; 508C End (wrap point) of the trace
3150 .long 0 ; 5090 Saved mask while in debugger
3151 .long 0 ; 5094 Size of trace table (1 - 256 pages)
3152 .long 0 ; 5098 traceGas[0]
3153 .long 0 ; 509C traceGas[1]
3154
3155 .long 0 ; 50A0 reserved
3156 .long 0 ; 50A4 reserved
3157 .long 0 ; 50A8 reserved
3158 .long 0 ; 50AC reserved
3159 .long 0 ; 50B0 reserved
3160 .long 0 ; 50B4 reserved
3161 .long 0 ; 50B8 reserved
3162 .long 0 ; 50BC reserved
3163 .long 0 ; 50C0 reserved
3164 .long 0 ; 50C4 reserved
3165 .long 0 ; 50C8 reserved
3166 .long 0 ; 50CC reserved
3167 .long 0 ; 50D0 reserved
3168 .long 0 ; 50D4 reserved
3169 .long 0 ; 50D8 reserved
3170 .long 0 ; 50DC reserved
3171 .long 0 ; 50E0 reserved
3172 .long 0 ; 50E4 reserved
3173 .long 0 ; 50E8 reserved
3174 .long 0 ; 50EC reserved
3175 .long 0 ; 50F0 reserved
3176 .long 0 ; 50F4 reserved
3177 .long 0 ; 50F8 reserved
3178 .long 0 ; 50FC reserved
3179
3180 .globl EXT(saveanchor)
3181
3182 EXT(saveanchor): ; 5100 saveanchor
3183 .set .,.+SVsize
3184
3185 .long 0 ; 5140 reserved
3186 .long 0 ; 5144 reserved
3187 .long 0 ; 5148 reserved
3188 .long 0 ; 514C reserved
3189 .long 0 ; 5150 reserved
3190 .long 0 ; 5154 reserved
3191 .long 0 ; 5158 reserved
3192 .long 0 ; 515C reserved
3193 .long 0 ; 5160 reserved
3194 .long 0 ; 5164 reserved
3195 .long 0 ; 5168 reserved
3196 .long 0 ; 516C reserved
3197 .long 0 ; 5170 reserved
3198 .long 0 ; 5174 reserved
3199 .long 0 ; 5178 reserved
3200 .long 0 ; 517C reserved
3201
3202 .long 0 ; 5180 tlbieLock
3203
3204 .long 0 ; 5184 reserved
3205 .long 0 ; 5188 reserved
3206 .long 0 ; 518C reserved
3207 .long 0 ; 5190 reserved
3208 .long 0 ; 5194 reserved
3209 .long 0 ; 5198 reserved
3210 .long 0 ; 519C reserved
3211 .long 0 ; 51A0 reserved
3212 .long 0 ; 51A4 reserved
3213 .long 0 ; 51A8 reserved
3214 .long 0 ; 51AC reserved
3215 .long 0 ; 51B0 reserved
3216 .long 0 ; 51B4 reserved
3217 .long 0 ; 51B8 reserved
3218 .long 0 ; 51BC reserved
3219 .long 0 ; 51C0 reserved
3220 .long 0 ; 51C4 reserved
3221 .long 0 ; 51C8 reserved
3222 .long 0 ; 51CC reserved
3223 .long 0 ; 51D0 reserved
3224 .long 0 ; 51D4 reserved
3225 .long 0 ; 51D8 reserved
3226 .long 0 ; 51DC reserved
3227 .long 0 ; 51E0 reserved
3228 .long 0 ; 51E4 reserved
3229 .long 0 ; 51E8 reserved
3230 .long 0 ; 51EC reserved
3231 .long 0 ; 51F0 reserved
3232 .long 0 ; 51F4 reserved
3233 .long 0 ; 51F8 reserved
3234 .long 0 ; 51FC reserved
3235
3236 .globl EXT(dgWork)
3237
3238 EXT(dgWork):
3239
3240 .long 0 ; 5200 dgLock
3241 .long 0 ; 5204 dgFlags
3242 .long 0 ; 5208 dgMisc0
3243 .long 0 ; 520C dgMisc1
3244 .long 0 ; 5210 dgMisc2
3245 .long 0 ; 5214 dgMisc3
3246 .long 0 ; 5218 dgMisc4
3247 .long 0 ; 521C dgMisc5
3248
3249 .long 0 ; 5220 reserved
3250 .long 0 ; 5224 reserved
3251 .long 0 ; 5228 reserved
3252 .long 0 ; 522C reserved
3253 .long 0 ; 5230 reserved
3254 .long 0 ; 5234 reserved
3255 .long 0 ; 5238 reserved
3256 .long 0 ; 523C reserved
3257 .long 0 ; 5240 reserved
3258 .long 0 ; 5244 reserved
3259 .long 0 ; 5248 reserved
3260 .long 0 ; 524C reserved
3261 .long 0 ; 5250 reserved
3262 .long 0 ; 5254 reserved
3263 .long 0 ; 5258 reserved
3264 .long 0 ; 525C reserved
3265 .long 0 ; 5260 reserved
3266 .long 0 ; 5264 reserved
3267 .long 0 ; 5268 reserved
3268 .long 0 ; 526C reserved
3269 .long 0 ; 5270 reserved
3270 .long 0 ; 5274 reserved
3271 .long 0 ; 5278 reserved
3272 .long 0 ; 527C reserved
3273
3274 .long 0 ; 5280 reserved
3275 .long 0 ; 5284 reserved
3276 .long 0 ; 5288 reserved
3277 .long 0 ; 528C reserved
3278 .long 0 ; 5290 reserved
3279 .long 0 ; 5294 reserved
3280 .long 0 ; 5298 reserved
3281 .long 0 ; 529C reserved
3282 .long 0 ; 52A0 reserved
3283 .long 0 ; 52A4 reserved
3284 .long 0 ; 52A8 reserved
3285 .long 0 ; 52AC reserved
3286 .long 0 ; 52B0 reserved
3287 .long 0 ; 52B4 reserved
3288 .long 0 ; 52B8 reserved
3289 .long 0 ; 52BC reserved
3290 .long 0 ; 52C0 reserved
3291 .long 0 ; 52C4 reserved
3292 .long 0 ; 52C8 reserved
3293 .long 0 ; 52CC reserved
3294 .long 0 ; 52D0 reserved
3295 .long 0 ; 52D4 reserved
3296 .long 0 ; 52D8 reserved
3297 .long 0 ; 52DC reserved
3298 .long 0 ; 52E0 reserved
3299 .long 0 ; 52E4 reserved
3300 .long 0 ; 52E8 reserved
3301 .long 0 ; 52EC reserved
3302 .long 0 ; 52F0 reserved
3303 .long 0 ; 52F4 reserved
3304 .long 0 ; 52F8 reserved
3305 .long 0 ; 52FC reserved
3306
3307 .globl EXT(killresv)
3308 EXT(killresv):
3309
3310 .long 0 ; 5300 Used to kill reservations
3311 .long 0 ; 5304 Used to kill reservations
3312 .long 0 ; 5308 Used to kill reservations
3313 .long 0 ; 530C Used to kill reservations
3314 .long 0 ; 5310 Used to kill reservations
3315 .long 0 ; 5314 Used to kill reservations
3316 .long 0 ; 5318 Used to kill reservations
3317 .long 0 ; 531C Used to kill reservations
3318 .long 0 ; 5320 Used to kill reservations
3319 .long 0 ; 5324 Used to kill reservations
3320 .long 0 ; 5328 Used to kill reservations
3321 .long 0 ; 532C Used to kill reservations
3322 .long 0 ; 5330 Used to kill reservations
3323 .long 0 ; 5334 Used to kill reservations
3324 .long 0 ; 5338 Used to kill reservations
3325 .long 0 ; 533C Used to kill reservations
3326 .long 0 ; 5340 Used to kill reservations
3327 .long 0 ; 5344 Used to kill reservations
3328 .long 0 ; 5348 Used to kill reservations
3329 .long 0 ; 534C Used to kill reservations
3330 .long 0 ; 5350 Used to kill reservations
3331 .long 0 ; 5354 Used to kill reservations
3332 .long 0 ; 5358 Used to kill reservations
3333 .long 0 ; 535C Used to kill reservations
3334 .long 0 ; 5360 Used to kill reservations
3335 .long 0 ; 5364 Used to kill reservations
3336 .long 0 ; 5368 Used to kill reservations
3337 .long 0 ; 536C Used to kill reservations
3338 .long 0 ; 5370 Used to kill reservations
3339 .long 0 ; 5374 Used to kill reservations
3340 .long 0 ; 5378 Used to kill reservations
3341 .long 0 ; 537C Used to kill reservations
3342
3343 .long 0 ; 5380 reserved
3344 .long 0 ; 5384 reserved
3345 .long 0 ; 5388 reserved
3346 .long 0 ; 538C reserved
3347 .long 0 ; 5390 reserved
3348 .long 0 ; 5394 reserved
3349 .long 0 ; 5398 reserved
3350 .long 0 ; 539C reserved
3351 .long 0 ; 53A0 reserved
3352 .long 0 ; 53A4 reserved
3353 .long 0 ; 53A8 reserved
3354 .long 0 ; 53AC reserved
3355 .long 0 ; 53B0 reserved
3356 .long 0 ; 53B4 reserved
3357 .long 0 ; 53B8 reserved
3358 .long 0 ; 53BC reserved
3359 .long 0 ; 53C0 reserved
3360 .long 0 ; 53C4 reserved
3361 .long 0 ; 53C8 reserved
3362 .long 0 ; 53CC reserved
3363 .long 0 ; 53D0 reserved
3364 .long 0 ; 53D4 reserved
3365 .long 0 ; 53D8 reserved
3366 .long 0 ; 53DC reserved
3367 .long 0 ; 53E0 reserved
3368 .long 0 ; 53E4 reserved
3369 .long 0 ; 53E8 reserved
3370 .long 0 ; 53EC reserved
3371 .long 0 ; 53F0 reserved
3372 .long 0 ; 53F4 reserved
3373 .long 0 ; 53F8 reserved
3374 .long 0 ; 53FC reserved
3375
3376
3377 ;
3378 ; The "shared page" is used for low-level debugging
3379 ;
3380
3381 . = 0x6000
3382 .globl EXT(sharedPage)
3383
3384 EXT(sharedPage): ; Per processor data area
3385 .long 0xC24BC195 ; Comm Area validity value
3386 .long 0x87859393 ; Comm Area validity value
3387 .long 0xE681A2C8 ; Comm Area validity value
3388 .long 0x8599855A ; Comm Area validity value
3389 .long 0xD74BD296 ; Comm Area validity value
3390 .long 0x8388E681 ; Comm Area validity value
3391 .long 0xA2C88599 ; Comm Area validity value
3392 .short 0x855A ; Comm Area validity value
3393 .short 1 ; Comm Area version number
3394 .fill 1016*4,1,0 ; (filled with 0s)
3395
3396 .data
3397 .align ALIGN
3398 .globl EXT(exception_end)
3399 EXT(exception_end):
3400 .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */
3401
3402
3403