]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/lowmem_vectors.s
918a92d618d618612d7fae58cb88a19aceb01787
[apple/xnu.git] / osfmk / ppc / lowmem_vectors.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 #include <assym.s>
30 #include <debug.h>
31 #include <cpus.h>
32 #include <db_machine_commands.h>
33
34 #include <mach_debug.h>
35 #include <ppc/asm.h>
36 #include <ppc/proc_reg.h>
37 #include <ppc/exception.h>
38 #include <ppc/Performance.h>
39 #include <ppc/savearea.h>
40 #include <mach/ppc/vm_param.h>
41
42 #define ESPDEBUG 0
43 #define INSTRUMENT 0
44
45 #define featAltivec 29
46 #define wasNapping 30
47
48 #define VECTOR_SEGMENT .section __VECTORS, __interrupts
49
50 VECTOR_SEGMENT
51
52
53 .globl EXT(ExceptionVectorsStart)
54
55 EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */
56 baseR: /* Used so we have more readable code */
57
58 ;
59 ; Handle system reset.
60 ; We do not ever expect a hard reset so we do not actually check.
61 ; When we come here, we check for a RESET_HANDLER_START (which means we are
62 ; waking up from sleep), a RESET_HANDLER_BUPOR (which is using for bring up
63 ; when starting directly from a POR), and RESET_HANDLER_IGNORE (which means
64 ; ignore the interrupt).
65 ;
66 ; Some machines (so far, 32-bit guys) will always ignore a non-START interrupt.
67 ; The ones who do take it, check if the interrupt is too be ignored. This is
68 ; always the case until the previous reset is handled (i.e., we have exited
69 ; from the debugger).
70 ;
71 . = 0xf0
72 .globl EXT(ResetHandler)
73 EXT(ResetHandler):
74 .long 0x0
75 .long 0x0
76 .long 0x0
77
78 . = 0x100
79 .L_handler100:
80 mtsprg 2,r13 /* Save R13 */
81 mtsprg 3,r11 /* Save R11 */
82 lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type
83 mfcr r11
84 cmpi cr0,r13,RESET_HANDLER_START
85 bne resetexc
86
87 li r11,RESET_HANDLER_NULL
88 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type
89
90 lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0)
91 lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0)
92 mtlr r4
93 blr
94
95 resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence?
96 bne resetexc2 ; No...
97 lis r4,hi16(EXT(resetPOR)) ; Get POR code
98 ori r4,r4,lo16(EXT(resetPOR)) ; The rest
99 mtlr r4 ; Set it
100 blr ; Jump to it....
101
102 resetexc2: cmplwi cr1,r13,RESET_HANDLER_IGNORE ; Are we ignoring these? (Software debounce)
103
104 mfsprg r13,0 ; Get per_proc
105 lwz r13,pfAvailable(r13) ; Get the features
106 rlwinm. r13,r13,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine?
107 cror cr1_eq,cr0_eq,cr1_eq ; See if we want to take this
108 bne-- cr1,rxCont ; Yes, continue...
109 bne-- rxIg64 ; 64-bit path...
110
111 mtcr r11 ; Restore the CR
112 mfsprg r13,2 ; Restore R13
113 mfsprg r11,0 ; Get per_proc
114 lwz r11,pfAvailable(r11) ; Get the features
115 mtsprg 2,r11 ; Restore sprg2
116 mfsprg r11,3 ; Restore R11
117 rfi ; Return and ignore the reset
118
119 rxIg64: mtcr r11 ; Restore the CR
120 mfsprg r11,0 ; Get per_proc
121 mtspr hsprg0,r14 ; Save a register
122 lwz r14,UAW(r11) ; Get the User Assist Word
123 mfsprg r13,2 ; Restore R13
124 lwz r11,pfAvailable(r11) ; Get the features
125 mtsprg 2,r11 ; Restore sprg2
126 mfsprg r11,3 ; Restore R11
127 mtsprg 3,r14 ; Set the UAW in sprg3
128 mfspr r14,hsprg0 ; Restore R14
129 rfid ; Return and ignore the reset
130
131 rxCont: mtcr r11
132 li r11,RESET_HANDLER_IGNORE ; Get set to ignore
133 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Start ignoring these
134 mfsprg r13,1 /* Get the exception save area */
135 li r11,T_RESET /* Set 'rupt code */
136 b .L_exception_entry /* Join common... */
137
138 /*
139 * Machine check
140 */
141
142 . = 0x200
143 .L_handler200:
144 mtsprg 2,r13 ; Save R13
145 mtsprg 3,r11 ; Save R11
146
147 .globl EXT(extPatchMCK)
148 LEXT(extPatchMCK) ; This is patched to a nop for 64-bit
149 b h200aaa ; Skip 64-bit code...
150
151 ;
152 ; Fall through here for 970 MCKs.
153 ;
154
155 li r11,1 ; ?
156 sldi r11,r11,32+3 ; ?
157 mfspr r13,hid4 ; ?
158 or r11,r11,r13 ; ?
159 sync
160 mtspr hid4,r11 ; ?
161 isync
162 li r11,1 ; ?
163 sldi r11,r11,32+8 ; ?
164 andc r13,r13,r11 ; ?
165 lis r11,0xE000 ; Get the unlikeliest ESID possible
166 sync
167 mtspr hid4,r13 ; ?
168 isync ; ?
169
170 srdi r11,r11,1 ; ?
171 slbie r11 ; ?
172 sync
173 isync
174
175 li r11,T_MACHINE_CHECK ; Set rupt code
176 b .L_exception_entry ; Join common...
177
178 ;
179 ; Preliminary checking of other MCKs
180 ;
181
182 h200aaa: mfsrr1 r11 ; Get the SRR1
183 mfcr r13 ; Save the CR
184
185 rlwinm. r11,r11,0,dcmck,dcmck ; ?
186 beq+ notDCache ; ?
187
188 sync
189 mfspr r11,msscr0 ; ?
190 dssall ; ?
191 sync
192 isync
193
194 oris r11,r11,hi16(dl1hwfm) ; ?
195 mtspr msscr0,r11 ; ?
196
197 rstbsy: mfspr r11,msscr0 ; ?
198
199 rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ?
200 bne rstbsy ; ?
201
202 sync ; ?
203
204 mfsprg r11,0 ; Get the per_proc
205 mtcrf 255,r13 ; Restore CRs
206 lwz r13,hwMachineChecks(r11) ; Get old count
207 addi r13,r13,1 ; Count this one
208 stw r13,hwMachineChecks(r11) ; Set new count
209 lwz r11,pfAvailable(r11) ; Get the feature flags
210 mfsprg r13,2 ; Restore R13
211 mtsprg 2,r11 ; Set the feature flags
212 mfsprg r11,3 ; Restore R11
213 rfi ; Return
214
215 notDCache: mtcrf 255,r13 ; Restore CRs
216 li r11,T_MACHINE_CHECK ; Set rupt code
217 b .L_exception_entry ; Join common...
218
219
220 /*
221 * Data access - page fault, invalid memory rights for operation
222 */
223
224 . = 0x300
225 .L_handler300:
226 mtsprg 2,r13 /* Save R13 */
227 mtsprg 3,r11 /* Save R11 */
228 li r11,T_DATA_ACCESS /* Set 'rupt code */
229 b .L_exception_entry /* Join common... */
230
231
232 /*
233 * Data segment
234 */
235
236 . = 0x380
237 .L_handler380:
238 mtsprg 2,r13 ; Save R13
239 mtsprg 3,r11 ; Save R11
240 li r11,T_DATA_SEGMENT ; Set rupt code
241 b .L_exception_entry ; Join common...
242
243 /*
244 * Instruction access - as for data access
245 */
246
247 . = 0x400
248 .L_handler400:
249 mtsprg 2,r13 ; Save R13
250 mtsprg 3,r11 ; Save R11
251 li r11,T_INSTRUCTION_ACCESS ; Set rupt code
252 b .L_exception_entry ; Join common...
253
254 /*
255 * Instruction segment
256 */
257
258 . = 0x480
259 .L_handler480:
260 mtsprg 2,r13 ; Save R13
261 mtsprg 3,r11 ; Save R11
262 li r11,T_INSTRUCTION_SEGMENT ; Set rupt code
263 b .L_exception_entry ; Join common...
264
265 /*
266 * External interrupt
267 */
268
269 . = 0x500
270 .L_handler500:
271 mtsprg 2,r13 ; Save R13
272 mtsprg 3,r11 ; Save R11
273 li r11,T_INTERRUPT ; Set rupt code
274 b .L_exception_entry ; Join common...
275
276 /*
277 * Alignment - many reasons
278 */
279
280 . = 0x600
281 .L_handler600:
282 mtsprg 2,r13 /* Save R13 */
283 mtsprg 3,r11 /* Save R11 */
284 li r11,T_ALIGNMENT|T_FAM /* Set 'rupt code */
285 b .L_exception_entry /* Join common... */
286
287 /*
288 * Program - floating point exception, illegal inst, priv inst, user trap
289 */
290
291 . = 0x700
292 .L_handler700:
293 mtsprg 2,r13 /* Save R13 */
294 mtsprg 3,r11 /* Save R11 */
295
296 #if 0
297 mfsrr1 r13 ; (BRINGUP)
298 mfcr r11 ; (BRINGUP)
299 rlwinm. r13,r13,0,12,12 ; (BRINGUP)
300 crmove cr1_eq,cr0_eq ; (BRINGUP)
301 mfsrr1 r13 ; (BRINGUP)
302 rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; (BRINGUP)
303 crorc cr0_eq,cr1_eq,cr0_eq ; (BRINGUP)
304 bf-- cr0_eq,. ; (BRINGUP)
305 mtcrf 255,r11 ; (BRINGUP)
306 #endif
307
308 li r11,T_PROGRAM|T_FAM /* Set 'rupt code */
309 b .L_exception_entry /* Join common... */
310
311 /*
312 * Floating point disabled
313 */
314
315 . = 0x800
316 .L_handler800:
317 mtsprg 2,r13 /* Save R13 */
318 mtsprg 3,r11 /* Save R11 */
319 li r11,T_FP_UNAVAILABLE /* Set 'rupt code */
320 b .L_exception_entry /* Join common... */
321
322
323 /*
324 * Decrementer - DEC register has passed zero.
325 */
326
327 . = 0x900
328 .L_handler900:
329 mtsprg 2,r13 /* Save R13 */
330 mtsprg 3,r11 /* Save R11 */
331 li r11,T_DECREMENTER /* Set 'rupt code */
332 b .L_exception_entry /* Join common... */
333
334 /*
335 * I/O controller interface error - MACH does not use this
336 */
337
338 . = 0xA00
339 .L_handlerA00:
340 mtsprg 2,r13 /* Save R13 */
341 mtsprg 3,r11 /* Save R11 */
342 li r11,T_IO_ERROR /* Set 'rupt code */
343 b .L_exception_entry /* Join common... */
344
345 /*
346 * Reserved
347 */
348
349 . = 0xB00
350 .L_handlerB00:
351 mtsprg 2,r13 /* Save R13 */
352 mtsprg 3,r11 /* Save R11 */
353 li r11,T_RESERVED /* Set 'rupt code */
354 b .L_exception_entry /* Join common... */
355
356 ;
357 ; System call - generated by the sc instruction
358 ;
359 ; We handle the ultra-fast traps right here. They are:
360 ;
361 ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask
362 ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv
363 ; 0x00007FF2 - User state only - thread info
364 ; 0x00007FF3 - User state only - floating point / vector facility status
365 ; 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines
366 ;
367 ; Note: none handled if virtual machine is running
368 ; Also, it we treat SCs as kernel SCs if the RI bit is set
369 ;
370
371 . = 0xC00
372 .L_handlerC00:
373 mtsprg 3,r11 ; Save R11
374 mfsprg r11,2 ; Get the feature flags
375
376 mtsprg 2,r13 ; Save R13
377 rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag
378 mfsrr1 r13 ; Get SRR1 for loadMSR
379 rlwimi r11,r13,MSR_PR_BIT-5,5,5 ; Move the PR bit to bit 1
380 mfcr r13 ; Save the CR
381
382 mtcrf 0x40,r11 ; Get the top 3 CR bits to 64-bit, PR, sign
383
384 cmpwi r0,lo16(-3) ; Eliminate all negatives but -1 and -2
385 mfsprg r11,0 ; Get the per_proc
386 bf-- 5,uftInKern ; We came from the kernel...
387 ble-- notufp ; This is a mach call
388
389 lwz r11,spcFlags(r11) ; Pick up the special flags
390
391 cmpwi cr7,r0,lo16(-1) ; Is this a BlueBox call?
392 cmplwi cr2,r0,0x7FF2 ; Ultra fast path cthread info call?
393 cmplwi cr3,r0,0x7FF3 ; Ultra fast path facility status?
394 cror cr4_eq,cr2_eq,cr3_eq ; Is this one of the two ufts we handle here?
395
396 ble-- cr7,uftBBCall ; We think this is blue box call...
397
398 rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
399 andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
400 cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
401 beq-- cr0,ufpVM ; fast paths running VM ...
402
403 bne-- cr4_eq,notufp ; Bail ifthis is not a uft...
404
405 ;
406 ; Handle normal user ultra-fast trap
407 ;
408
409 li r3,spcFlags ; Assume facility status - 0x7FF3
410
411 beq-- cr3,uftFacStat ; This is a facilities status call...
412
413 li r3,UAW ; This is really a thread info call - 0x7FF2
414
415 uftFacStat: mfsprg r11,0 ; Get the per_proc
416 lwzx r3,r11,r3 ; Get the UAW or spcFlags field
417
418 uftExit: bt++ 4,uftX64 ; Go do the 64-bit exit...
419
420 lwz r11,pfAvailable(r11) ; Get the feature flags
421 mtcrf 255,r13 ; Restore the CRs
422 mfsprg r13,2 ; Restore R13
423 mtsprg 2,r11 ; Set the feature flags
424 mfsprg r11,3 ; Restore R11
425
426 rfi ; Back to our guy...
427
428 uftX64: mtspr hsprg0,r14 ; Save a register
429
430 lwz r14,UAW(r11) ; Get the User Assist Word
431 lwz r11,pfAvailable(r11) ; Get the feature flags
432
433 mtcrf 255,r13 ; Restore the CRs
434
435 mfsprg r13,2 ; Restore R13
436 mtsprg 2,r11 ; Set the feature flags
437 mfsprg r11,3 ; Restore R11
438 mtsprg 3,r14 ; Set the UAW in sprg3
439 mfspr r14,hsprg0 ; Restore R14
440
441 rfid ; Back to our guy...
442
443 ;
444 ; Handle BlueBox ultra-fast trap
445 ;
446
447 uftBBCall: andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
448 cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
449 blt-- notufp ; No...
450
451 rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
452
453 mfsprg r11,0 ; Get the per proc
454
455 beq++ cr7,uftExit ; For MKIsPreemptiveTask we are done...
456
457 lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv from per_proc_area
458 b uftExit ; We are really all done now...
459
460 ; Kernel ultra-fast trap
461
462 uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR?
463 bne- notufp ; Someone is trying to cheat...
464
465 mtsrr1 r3 ; Set new MSR
466
467 b uftExit ; Go load the new MSR...
468
469 notufp: mtcrf 0xFF,r13 ; Restore the used CRs
470 li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code
471 b .L_exception_entry ; Join common...
472
473
474
475
476
477 /*
478 * Trace - generated by single stepping
479 * performance monitor BE branch enable tracing/logging
480 * is also done here now. while this is permanently in the
481 * system the impact is completely unnoticable as this code is
482 * only executed when (a) a single step or branch exception is
483 * hit, (b) in the single step debugger case there is so much
484 * overhead already the few extra instructions for testing for BE
485 * are not even noticable, (c) the BE logging code is *only* run
486 * when it is enabled by the tool which will not happen during
487 * normal system usage
488 *
489 * Note that this trace is available only to user state so we do not
490 * need to set sprg2 before returning.
491 */
492
493 . = 0xD00
494 .L_handlerD00:
495 mtsprg 3,r11 ; Save R11
496 mfsprg r11,2 ; Get the feature flags
497 mtsprg 2,r13 ; Save R13
498 rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag
499 mfcr r13 ; Get the CR
500 mtcrf 0x40,r11 ; Set the CR
501 mfsrr1 r11 ; Get the old MSR
502 rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state?
503
504 mfsprg r11,0 ; Get the per_proc
505 lhz r11,PP_CPU_FLAGS(r11) ; Get the flags
506 crmove cr1_eq,cr0_eq ; Remember if we are in supervisor state
507 rlwinm. r11,r11,0,traceBEb+16,traceBEb+16 ; Special trace enabled?
508 cror cr0_eq,cr0_eq,cr1_eq ; Is trace off or supervisor state?
509 bf-- cr0_eq,specbrtr ; No, we need to trace...
510
511 notspectr: mtcr r13 ; Restore CR
512 li r11,T_TRACE|T_FAM ; Set interrupt code
513 b .L_exception_entry ; Join common...
514
515 .align 5
516
517 ;
518 ; We are doing the special branch trace
519 ;
520
521 specbrtr: mfsprg r11,0 ; Get the per_proc area
522 bt++ 4,sbxx64a ; Jump if 64-bit...
523
524 stw r1,tempr0+4(r11) ; Save in a scratch area
525 stw r2,tempr1+4(r11) ; Save in a scratch area
526 stw r3,tempr2+4(r11) ; Save in a scratch area
527 b sbxx64b ; Skip...
528
529 sbxx64a: std r1,tempr0(r11) ; Save in a scratch area
530 std r2,tempr1(r11) ; Save in a scratch area
531 std r3,tempr2(r11) ; Save in a scratch area
532
533 sbxx64b: lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer
534 lwz r3,spcTRp(r11) ; Pick up buffer position
535 ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer
536 cmplwi cr2,r3,4092 ; Set cr1_eq if we should take exception
537 mfsrr0 r1 ; Get the pc
538 stwx r1,r2,r3 ; Save it in the buffer
539 addi r3,r3,4 ; Point to the next slot
540 rlwinm r3,r3,0,20,31 ; Wrap the slot at one page
541 stw r3,spcTRp(r11) ; Save the new slot
542
543 bt++ 4,sbxx64c ; Jump if 64-bit...
544
545 lwz r1,tempr0+4(r11) ; Restore work register
546 lwz r2,tempr1+4(r11) ; Restore work register
547 lwz r3,tempr2+4(r11) ; Restore work register
548 beq cr2,notspectr ; Buffer filled, make a rupt...
549 b uftExit ; Go restore and leave...
550
551 sbxx64c: ld r1,tempr0(r11) ; Restore work register
552 ld r2,tempr1(r11) ; Restore work register
553 ld r3,tempr2(r11) ; Restore work register
554 beq cr2,notspectr ; Buffer filled, make a rupt...
555 b uftExit ; Go restore and leave...
556
557 /*
558 * Floating point assist
559 */
560
561 . = 0xE00
562 .L_handlerE00:
563 mtsprg 2,r13 /* Save R13 */
564 mtsprg 3,r11 /* Save R11 */
565 li r11,T_FP_ASSIST /* Set 'rupt code */
566 b .L_exception_entry /* Join common... */
567
568
569 /*
570 * Performance monitor interruption
571 */
572
573 . = 0xF00
574 PMIhandler:
575 mtsprg 2,r13 /* Save R13 */
576 mtsprg 3,r11 /* Save R11 */
577 li r11,T_PERF_MON /* Set 'rupt code */
578 b .L_exception_entry /* Join common... */
579
580
581 /*
582 * VMX exception
583 */
584
585 . = 0xF20
586 VMXhandler:
587 mtsprg 2,r13 /* Save R13 */
588 mtsprg 3,r11 /* Save R11 */
589 li r11,T_VMX /* Set 'rupt code */
590 b .L_exception_entry /* Join common... */
591
592
593
594 ;
595 ; Instruction translation miss exception - not supported
596 ;
597
598 . = 0x1000
599 .L_handler1000:
600 mtsprg 2,r13 ; Save R13
601 mtsprg 3,r11 ; Save R11
602 li r11,T_INVALID_EXCP0 ; Set rupt code
603 b .L_exception_entry ; Join common...
604
605
606
607 ;
608 ; Data load translation miss exception - not supported
609 ;
610
611 . = 0x1100
612 .L_handler1100:
613 mtsprg 2,r13 ; Save R13
614 mtsprg 3,r11 ; Save R11
615 li r11,T_INVALID_EXCP1 ; Set rupt code
616 b .L_exception_entry ; Join common...
617
618
619
620 ;
621 ; Data store translation miss exception - not supported
622 ;
623
624 . = 0x1200
625 .L_handler1200:
626 mtsprg 2,r13 ; Save R13
627 mtsprg 3,r11 ; Save R11
628 li r11,T_INVALID_EXCP2 ; Set rupt code
629 b .L_exception_entry ; Join common...
630
631
632 /*
633 * Instruction address breakpoint
634 */
635
636 . = 0x1300
637 .L_handler1300:
638 mtsprg 2,r13 /* Save R13 */
639 mtsprg 3,r11 /* Save R11 */
640 li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */
641 b .L_exception_entry /* Join common... */
642
643 /*
644 * System management interrupt
645 */
646
647 . = 0x1400
648 .L_handler1400:
649 mtsprg 2,r13 /* Save R13 */
650 mtsprg 3,r11 /* Save R11 */
651 li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */
652 b .L_exception_entry /* Join common... */
653
654
655 /*
656 * Soft Patch
657 */
658
659 . = 0x1500
660 .L_handler1500:
661 mtsprg 2,r13 /* Save R13 */
662 mtsprg 3,r11 /* Save R11 */
663 li r11,T_SOFT_PATCH /* Set 'rupt code */
664 b .L_exception_entry /* Join common... */
665
666 ;
667 ; Altivec Java Mode Assist interrupt or Maintenace interrupt
668 ;
669
670 . = 0x1600
671 .L_handler1600:
672 mtsprg 2,r13 /* Save R13 */
673 mtsprg 3,r11 /* Save R11 */
674 li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */
675 b .L_exception_entry /* Join common... */
676
677 ;
678 ; Altivec Java Mode Assist interrupt or Thermal interruption
679 ;
680
681 . = 0x1700
682 .L_handler1700:
683 mtsprg 2,r13 /* Save R13 */
684 mtsprg 3,r11 /* Save R11 */
685 li r11,T_THERMAL /* Set 'rupt code */
686 b .L_exception_entry /* Join common... */
687
688 ;
689 ; Thermal interruption - 64-bit
690 ;
691
692 . = 0x1800
693 .L_handler1800:
694 mtsprg 2,r13 /* Save R13 */
695 mtsprg 3,r11 /* Save R11 */
696 li r11,T_ARCHDEP0 /* Set 'rupt code */
697 b .L_exception_entry /* Join common... */
698
699 /*
700 * There is now a large gap of reserved traps
701 */
702
703 /*
704 * Instrumentation interruption
705 */
706
707 . = 0x2000
708 .L_handler2000:
709 mtsprg 2,r13 /* Save R13 */
710 mtsprg 3,r11 /* Save R11 */
711 li r11,T_INSTRUMENTATION /* Set 'rupt code */
712 b .L_exception_entry /* Join common... */
713
714 . = 0x2100
715
716 /*
717 * Filter Ultra Fast Path syscalls for VMM
718 */
719 ufpVM:
720 cmpwi cr2,r0,0x6004 ; Is it vmm_dispatch
721 bne cr2,notufp ; Exit If not
722 cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
723 cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
724 cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
725 bt- cr1_eq,notufp ; Exit if out of range
726 b EXT(vmm_ufp) ; Ultra Fast Path syscall
727
728 /*
729 * .L_exception_entry(type)
730 *
731 * This is the common exception handling routine called by any
732 * type of system exception.
733 *
734 * ENTRY: via a system exception handler, thus interrupts off, VM off.
735 * r3 has been saved in sprg3 and now contains a number
736 * representing the exception's origins
737 *
738 */
739
740 .data
741 .align ALIGN
742 .globl EXT(exception_entry)
743 EXT(exception_entry):
744 .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */
745
746 VECTOR_SEGMENT
747 .align 5
748
749 .L_exception_entry:
750
751 /*
752 *
753 * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ
754 * instruction to clear and allcoate a line in the cache. This way we won't take any cache
755 * misses, so these stores won't take all that long. Except the first line that is because
756 * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
757 * off also.
758 *
759 * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
760 * are ignored.
761 */
762
763
764 .globl EXT(extPatch32)
765
766
767 LEXT(extPatch32)
768 b extEntry64 ; Go do 64-bit (patched out for 32-bit)
769 mfsprg r13,0 ; Load per_proc
770 lwz r13,next_savearea+4(r13) ; Get the exception save area
771 stw r0,saver0+4(r13) ; Save register 0
772 stw r1,saver1+4(r13) ; Save register 1
773
774 mfspr r1,hid0 ; Get HID0
775 mfcr r0 ; Save the whole CR
776
777 mtcrf 0x20,r1 ; Get set to test for sleep
778 cror doze,doze,nap ; Remember if we are napping
779 bf sleep,notsleep ; Skip if we are not trying to sleep
780
781 mtcrf 0x20,r0 ; Restore the CR
782 lwz r0,saver0+4(r13) ; Restore R0
783 lwz r1,saver1+4(r13) ; Restore R1
784 mfsprg r13,0 ; Get the per_proc
785 lwz r11,pfAvailable(r13) ; Get back the feature flags
786 mfsprg r13,2 ; Restore R13
787 mtsprg 2,r11 ; Set sprg2 to the features
788 mfsprg r11,3 ; Restore R11
789 rfi ; Jump back into sleep code...
790 .long 0 ; Leave these here please...
791 .long 0
792 .long 0
793 .long 0
794 .long 0
795 .long 0
796 .long 0
797 .long 0
798
799
800 ;
801 ; This is the 32-bit context saving stuff
802 ;
803
804 .align 5
805
806 notsleep: stw r2,saver2+4(r13) ; Save this one
807 bf doze,notspdo ; Skip the next if we are not napping/dozing...
808 rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits
809 mtspr hid0,r2 ; Clear the nap/doze bits
810 notspdo:
811
812 #if INSTRUMENT
813 mfspr r2,pmc1 ; INSTRUMENT - saveinstr[0] - Take earliest possible stamp
814 stw r2,0x6100+(0x00*16)+0x0(0) ; INSTRUMENT - Save it
815 mfspr r2,pmc2 ; INSTRUMENT - Get stamp
816 stw r2,0x6100+(0x00*16)+0x4(0) ; INSTRUMENT - Save it
817 mfspr r2,pmc3 ; INSTRUMENT - Get stamp
818 stw r2,0x6100+(0x00*16)+0x8(0) ; INSTRUMENT - Save it
819 mfspr r2,pmc4 ; INSTRUMENT - Get stamp
820 stw r2,0x6100+(0x00*16)+0xC(0) ; INSTRUMENT - Save it
821 #endif
822
823 la r1,saver4(r13) ; Point to the next line in case we need it
824 crmove wasNapping,doze ; Remember if we were napping
825 mfsprg r2,0 ; Get the per_proc area
826 dcbz 0,r1 ; allocate r4-r7 32-byte line in cache
827
828 ;
829 ; Remember, we are setting up CR6 with feature flags
830 ;
831 andi. r1,r11,T_FAM ; Check FAM bit
832
833 stw r3,saver3+4(r13) ; Save this one
834 stw r4,saver4+4(r13) ; Save this one
835 andc r11,r11,r1 ; Clear FAM bit
836 beq+ noFAM ; Is it FAM intercept
837 mfsrr1 r3 ; Load srr1
838 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
839 beq+ noFAM ; From supervisor state
840 lwz r1,spcFlags(r2) ; Load spcFlags
841 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
842 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
843 bne+ noFAM ; Can this context be FAM intercept
844 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
845 srwi r1,r11,2 ; divide r11 by 4
846 lis r3,0x8000 ; Set r3 to 0x80000000
847 srw r1,r3,r1 ; Set bit for current exception
848 and. r1,r1,r4 ; And current exception with the intercept mask
849 beq+ noFAM ; Is it FAM intercept
850 b EXT(vmm_fam_exc)
851 noFAM:
852 lwz r1,pfAvailable(r2) ; Get the CPU features flags
853 la r3,saver8(r13) ; Point to line with r8-r11
854 mtcrf 0xE2,r1 ; Put the features flags (that we care about) in the CR
855 dcbz 0,r3 ; allocate r8-r11 32-byte line in cache
856 la r3,saver12(r13) ; point to r12-r15 line
857 lis r4,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
858 stw r6,saver6+4(r13) ; Save this one
859 ori r4,r4,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
860 stw r8,saver8+4(r13) ; Save this one
861 crmove featAltivec,pfAltivecb ; Set the Altivec flag
862 mtmsr r4 ; Set MSR
863 isync
864 mfsrr0 r6 ; Get the interruption SRR0
865 la r8,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
866 dcbz 0,r3 ; allocate r12-r15 32-byte line in cache
867 la r3,saver16(r13) ; point to next line
868 dcbz 0,r8 ; allocate 32-byte line with SRR0, SRR1, CR, XER, and LR
869 stw r7,saver7+4(r13) ; Save this one
870 lhz r8,PP_CPU_FLAGS(r2) ; Get the flags
871 mfsrr1 r7 ; Get the interrupt SRR1
872 rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
873 stw r6,savesrr0+4(r13) ; Save the SRR0
874 rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit
875 stw r5,saver5+4(r13) ; Save this one
876 and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on
877 mfsprg r6,2 ; Get interrupt time R13
878 mtsprg 2,r1 ; Set the feature flags
879 andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set
880 mfsprg r8,3 ; Get rupt time R11
881 stw r7,savesrr1+4(r13) ; Save SRR1
882 stw r8,saver11+4(r13) ; Save rupt time R11
883 stw r6,saver13+4(r13) ; Save rupt R13
884 dcbz 0,r3 ; allocate 32-byte line with r16-r19
885 la r3,saver20(r13) ; point to next line
886
887 getTB: mftbu r6 ; Get the upper timebase
888 mftb r7 ; Get the lower timebase
889 mftbu r8 ; Get the upper one again
890 cmplw r6,r8 ; Did the top tick?
891 bne- getTB ; Yeah, need to get it again...
892
893 #if INSTRUMENT
894 mfspr r6,pmc1 ; INSTRUMENT - saveinstr[1] - Save halfway context save stamp
895 stw r6,0x6100+(0x01*16)+0x0(0) ; INSTRUMENT - Save it
896 mfspr r6,pmc2 ; INSTRUMENT - Get stamp
897 stw r6,0x6100+(0x01*16)+0x4(0) ; INSTRUMENT - Save it
898 mfspr r6,pmc3 ; INSTRUMENT - Get stamp
899 stw r6,0x6100+(0x01*16)+0x8(0) ; INSTRUMENT - Save it
900 mfspr r6,pmc4 ; INSTRUMENT - Get stamp
901 stw r6,0x6100+(0x01*16)+0xC(0) ; INSTRUMENT - Save it
902 #endif
903
904 stw r8,ruptStamp(r2) ; Save the top of time stamp
905 stw r8,SAVtime(r13) ; Save the top of time stamp
906 stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp
907 stw r7,SAVtime+4(r13) ; Save the bottom of time stamp
908
909 dcbz 0,r3 ; allocate 32-byte line with r20-r23
910 stw r9,saver9+4(r13) ; Save this one
911
912 stw r10,saver10+4(r13) ; Save this one
913 mflr r4 ; Get the LR
914 mfxer r10 ; Get the XER
915
916 bf+ wasNapping,notNapping ; Skip if not waking up from nap...
917
918 lwz r6,napStamp+4(r2) ; Pick up low order nap stamp
919 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
920 lwz r5,napStamp(r2) ; and high order
921 subfc r7,r6,r7 ; Subtract low stamp from now
922 lwz r6,napTotal+4(r2) ; Pick up low total
923 subfe r5,r5,r8 ; Subtract high stamp and borrow from now
924 lwz r8,napTotal(r2) ; Pick up the high total
925 addc r6,r6,r7 ; Add low to total
926 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
927 adde r8,r8,r5 ; Add high and carry to total
928 stw r6,napTotal+4(r2) ; Save the low total
929 stw r8,napTotal(r2) ; Save the high total
930 stw r3,savesrr0+4(r13) ; Modify to return to nap/doze exit
931
932 rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored?
933 beq notInSlowNap
934
935 lwz r3,pfHID1(r2) ; Get saved HID1 value
936 mtspr hid1,r3 ; Restore HID1
937
938 notInSlowNap:
939 rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored?
940 beq notNapping
941
942 lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value
943 mtspr msscr0,r3 ; Restore MSSCR0
944 sync
945 isync
946
947 notNapping: stw r12,saver12+4(r13) ; Save this one
948
949 stw r14,saver14+4(r13) ; Save this one
950 stw r15,saver15+4(r13) ; Save this one
951 la r14,saver24(r13) ; Point to the next block to save into
952 mfctr r6 ; Get the CTR
953 stw r16,saver16+4(r13) ; Save this one
954 la r15,savectr(r13) ; point to line with CTR, DAR, DSISR, Exception code, and VRSAVE
955 stw r4,savelr+4(r13) ; Save rupt LR
956
957 dcbz 0,r14 ; allocate 32-byte line with r24-r27
958 la r16,saver28(r13) ; point to line with r28-r31
959 dcbz 0,r15 ; allocate line with CTR, DAR, DSISR, Exception code, and VRSAVE
960 stw r17,saver17+4(r13) ; Save this one
961 stw r18,saver18+4(r13) ; Save this one
962 stw r6,savectr+4(r13) ; Save rupt CTR
963 stw r0,savecr(r13) ; Save rupt CR
964 stw r19,saver19+4(r13) ; Save this one
965 mfdar r6 ; Get the rupt DAR
966 stw r20,saver20+4(r13) ; Save this one
967 dcbz 0,r16 ; allocate 32-byte line with r28-r31
968
969 stw r21,saver21+4(r13) ; Save this one
970 lwz r21,spcFlags(r2) ; Get the special flags from per_proc
971 stw r10,savexer+4(r13) ; Save the rupt XER
972 stw r30,saver30+4(r13) ; Save this one
973 lhz r30,pfrptdProc(r2) ; Get the reported processor type
974 stw r31,saver31+4(r13) ; Save this one
975 stw r22,saver22+4(r13) ; Save this one
976 stw r23,saver23+4(r13) ; Save this one
977 stw r24,saver24+4(r13) ; Save this one
978 stw r25,saver25+4(r13) ; Save this one
979 mfdsisr r7 ; Get the rupt DSISR
980 stw r26,saver26+4(r13) ; Save this one
981 stw r27,saver27+4(r13) ; Save this one
982 andis. r21,r21,hi16(perfMonitor) ; Is the performance monitor enabled?
983 stw r28,saver28+4(r13) ; Save this one
984 cmpwi cr1, r30,CPU_SUBTYPE_POWERPC_750 ; G3?
985 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
986 cmpwi cr2,r30,CPU_SUBTYPE_POWERPC_7400 ; This guy?
987 stw r29,saver29+4(r13) ; Save R29
988 stw r6,savedar+4(r13) ; Save the rupt DAR
989 li r10,savepmc ; Point to pmc savearea
990
991 beq+ noPerfMonSave32 ; No perfmon on here...
992
993 dcbz r10,r13 ; Clear first part of pmc area
994 li r10,savepmc+0x20 ; Point to pmc savearea second part
995 li r22,0 ; r22: zero
996 dcbz r10,r13 ; Clear second part of pmc area
997
998 beq cr1,perfMonSave32_750 ; This is a G3...
999
1000 beq cr2,perfMonSave32_7400 ; Regular olde G4...
1001
1002 mfspr r24,pmc5 ; Here for a 7450
1003 mfspr r25,pmc6
1004 stw r24,savepmc+16(r13) ; Save PMC5
1005 stw r25,savepmc+20(r13) ; Save PMC6
1006 mtspr pmc5,r22 ; Leave PMC5 clear
1007 mtspr pmc6,r22 ; Leave PMC6 clear
1008
1009 perfMonSave32_7400:
1010 mfspr r25,mmcr2
1011 stw r25,savemmcr2+4(r13) ; Save MMCR2
1012 mtspr mmcr2,r22 ; Leave MMCR2 clear
1013
1014 perfMonSave32_750:
1015 mfspr r23,mmcr0
1016 mfspr r24,mmcr1
1017 stw r23,savemmcr0+4(r13) ; Save MMCR0
1018 stw r24,savemmcr1+4(r13) ; Save MMCR1
1019 mtspr mmcr0,r22 ; Leave MMCR0 clear
1020 mtspr mmcr1,r22 ; Leave MMCR1 clear
1021 mfspr r23,pmc1
1022 mfspr r24,pmc2
1023 mfspr r25,pmc3
1024 mfspr r26,pmc4
1025 stw r23,savepmc+0(r13) ; Save PMC1
1026 stw r24,savepmc+4(r13) ; Save PMC2
1027 stw r25,savepmc+8(r13) ; Save PMC3
1028 stw r26,savepmc+12(r13) ; Save PMC4
1029 mtspr pmc1,r22 ; Leave PMC1 clear
1030 mtspr pmc2,r22 ; Leave PMC2 clear
1031 mtspr pmc3,r22 ; Leave PMC3 clear
1032 mtspr pmc4,r22 ; Leave PMC4 clear
1033
1034 noPerfMonSave32:
1035 dcbz 0,r27 ; allocate line with VSCR and FPSCR
1036
1037 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1038 stw r11,saveexception(r13) ; Save the exception code
1039
1040
1041 ;
1042 ; Everything is saved at this point, except for FPRs, and VMX registers.
1043 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1044 ;
1045
1046 lwz r25,traceMask(0) ; Get the trace mask
1047 li r0,SAVgeneral ; Get the savearea type value
1048 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1049 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
1050 stb r0,SAVflags+2(r13) ; Mark valid context
1051 addi r22,r22,10 ; Adjust code so we shift into CR5
1052 li r23,trcWork ; Get the trace work area address
1053 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1054 li r26,0x8 ; Get start of cpu mask
1055 srw r26,r26,r19 ; Get bit position of cpu number
1056 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1057 and. r26,r26,r25 ; See if we trace this cpu
1058 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1059 ;
1060 ; At this point, we can take another exception and lose nothing.
1061 ;
1062
1063 #if INSTRUMENT
1064 mfspr r26,pmc1 ; INSTRUMENT - saveinstr[2] - Take stamp after save is done
1065 stw r26,0x6100+(0x02*16)+0x0(0) ; INSTRUMENT - Save it
1066 mfspr r26,pmc2 ; INSTRUMENT - Get stamp
1067 stw r26,0x6100+(0x02*16)+0x4(0) ; INSTRUMENT - Save it
1068 mfspr r26,pmc3 ; INSTRUMENT - Get stamp
1069 stw r26,0x6100+(0x02*16)+0x8(0) ; INSTRUMENT - Save it
1070 mfspr r26,pmc4 ; INSTRUMENT - Get stamp
1071 stw r26,0x6100+(0x02*16)+0xC(0) ; INSTRUMENT - Save it
1072 #endif
1073
1074 bne+ cr5,xcp32xit ; Skip all of this if no tracing here...
1075
1076 ;
1077 ; We select a trace entry using a compare and swap on the next entry field.
1078 ; Since we do not lock the actual trace buffer, there is a potential that
1079 ; another processor could wrap an trash our entry. Who cares?
1080 ;
1081
1082 lwz r25,traceStart(0) ; Get the start of trace table
1083 lwz r26,traceEnd(0) ; Get end of trace table
1084
1085 trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1086
1087 addi r22,r20,LTR_size ; Point to the next trace entry
1088 cmplw r22,r26 ; Do we need to wrap the trace table?
1089 bne+ gotTrcEnt ; No wrap, we got us a trace entry...
1090
1091 mr r22,r25 ; Wrap back to start
1092
1093 gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer
1094 bne- trcsel ; Collision, try again...
1095
1096 #if ESPDEBUG
1097 dcbf 0,r23 ; Force to memory
1098 sync
1099 #endif
1100
1101 dcbz 0,r20 ; Clear and allocate first trace line
1102
1103 ;
1104 ; Let us cut that trace entry now.
1105 ;
1106
1107 lwz r16,ruptStamp(r2) ; Get top of time base
1108 lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp
1109
1110 li r14,32 ; Offset to second line
1111
1112 lwz r0,saver0+4(r13) ; Get back interrupt time R0
1113 lwz r1,saver1+4(r13) ; Get back interrupt time R1
1114 lwz r8,savecr(r13) ; Get the CR value
1115
1116 dcbz r14,r20 ; Zap the second line
1117
1118 sth r19,LTR_cpu(r20) ; Stash the cpu number
1119 li r14,64 ; Offset to third line
1120 sth r11,LTR_excpt(r20) ; Save the exception type
1121 lwz r7,saver2+4(r13) ; Get back interrupt time R2
1122 lwz r3,saver3+4(r13) ; Restore this one
1123
1124 dcbz r14,r20 ; Zap the third half
1125
1126 mfdsisr r9 ; Get the DSISR
1127 li r14,96 ; Offset to forth line
1128 stw r16,LTR_timeHi(r20) ; Set the upper part of TB
1129 stw r17,LTR_timeLo(r20) ; Set the lower part of TB
1130 lwz r10,savelr+4(r13) ; Get the LR
1131 mfsrr0 r17 ; Get SRR0 back, it is still good
1132
1133 dcbz r14,r20 ; Zap the forth half
1134 lwz r4,saver4+4(r13) ; Restore this one
1135 lwz r5,saver5+4(r13) ; Restore this one
1136 mfsrr1 r18 ; SRR1 is still good in here
1137
1138 stw r8,LTR_cr(r20) ; Save the CR
1139 lwz r6,saver6+4(r13) ; Get R6
1140 mfdar r16 ; Get this back
1141 stw r9,LTR_dsisr(r20) ; Save the DSISR
1142 stw r17,LTR_srr0+4(r20) ; Save the SSR0
1143
1144 stw r18,LTR_srr1+4(r20) ; Save the SRR1
1145 stw r16,LTR_dar+4(r20) ; Save the DAR
1146 mfctr r17 ; Get the CTR (still good in register)
1147 stw r13,LTR_save+4(r20) ; Save the savearea
1148 stw r10,LTR_lr+4(r20) ; Save the LR
1149
1150 stw r17,LTR_ctr+4(r20) ; Save off the CTR
1151 stw r0,LTR_r0+4(r20) ; Save off register 0
1152 stw r1,LTR_r1+4(r20) ; Save off register 1
1153 stw r7,LTR_r2+4(r20) ; Save off register 2
1154
1155
1156 stw r3,LTR_r3+4(r20) ; Save off register 3
1157 stw r4,LTR_r4+4(r20) ; Save off register 4
1158 stw r5,LTR_r5+4(r20) ; Save off register 5
1159 stw r6,LTR_r6+4(r20) ; Save off register 6
1160
1161 #if ESPDEBUG
1162 addi r17,r20,32 ; Second line
1163 addi r16,r20,64 ; Third line
1164 dcbst br0,r20 ; Force to memory
1165 dcbst br0,r17 ; Force to memory
1166 addi r17,r17,32 ; Fourth line
1167 dcbst br0,r16 ; Force to memory
1168 dcbst br0,r17 ; Force to memory
1169
1170 sync ; Make sure it all goes
1171 #endif
1172 xcp32xit: mr r14,r11 ; Save the interrupt code across the call
1173 bl EXT(save_get_phys_32) ; Grab a savearea
1174 mfsprg r2,0 ; Get the per_proc info
1175 li r10,emfp0 ; Point to floating point save
1176 mr r11,r14 ; Get the exception code back
1177 dcbz r10,r2 ; Clear for speed
1178 stw r3,next_savearea+4(r2) ; Store the savearea for the next rupt
1179
1180 #if INSTRUMENT
1181 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[3] - Take stamp after next savearea
1182 stw r4,0x6100+(0x03*16)+0x0(0) ; INSTRUMENT - Save it
1183 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
1184 stw r4,0x6100+(0x03*16)+0x4(0) ; INSTRUMENT - Save it
1185 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
1186 stw r4,0x6100+(0x03*16)+0x8(0) ; INSTRUMENT - Save it
1187 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
1188 stw r4,0x6100+(0x03*16)+0xC(0) ; INSTRUMENT - Save it
1189 #endif
1190 b xcpCommon ; Go join the common interrupt processing...
1191
1192 ;
1193 ;
1194 ; This is the 64-bit context saving stuff
1195 ;
1196
1197 .align 5
1198
1199 extEntry64: mfsprg r13,0 ; Load per_proc
1200 ld r13,next_savearea(r13) ; Get the exception save area
1201 std r0,saver0(r13) ; Save register 0
1202 lis r0,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
1203 std r1,saver1(r13) ; Save register 1
1204 ori r1,r0,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
1205 lis r0,0x0010 ; Get rupt code transform validity mask
1206 mtmsr r1 ; Set MSR
1207 isync
1208
1209 ori r0,r0,0x0200 ; Get rupt code transform validity mask
1210 std r2,saver2(r13) ; Save this one
1211 lis r1,0x00F0 ; Top half of xform XOR
1212 rlwinm r2,r11,29,27,31 ; Get high 5 bits of rupt code
1213 std r3,saver3(r13) ; Save this one
1214 slw r0,r0,r2 ; Move transform validity bit to bit 0
1215 std r4,saver4(r13) ; Save this one
1216 std r5,saver5(r13) ; Save this one
1217 ori r1,r1,0x04EC ; Bottom half of xform XOR
1218 mfxer r5 ; Save the XER because we are about to muck with it
1219 rlwinm r4,r11,1,27,28 ; Get bottom of interrupt code * 8
1220 lis r3,hi16(dozem|napm) ; Get the nap and doze bits
1221 srawi r0,r0,31 ; Get 0xFFFFFFFF of xform valid, 0 otherwise
1222 rlwnm r4,r1,r4,24,31 ; Extract the xform XOR
1223 li r1,saver16 ; Point to the next line
1224 and r4,r4,r0 ; Only keep transform if we are to use it
1225 li r2,lgKillResv ; Point to the killing field
1226 mfcr r0 ; Save the CR
1227 stwcx. r2,0,r2 ; Kill any pending reservation
1228 dcbz128 r1,r13 ; Blow away the line
1229 sldi r3,r3,32 ; Position it
1230 mfspr r1,hid0 ; Get HID0
1231 andc r3,r1,r3 ; Clear nap and doze
1232 xor r11,r11,r4 ; Transform 970 rupt code to standard keeping FAM bit
1233 cmpld r3,r1 ; See if nap and/or doze was on
1234 std r6,saver6(r13) ; Save this one
1235 mfsprg r2,0 ; Get the per_proc area
1236 la r6,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
1237 beq++ eE64NoNap ; No nap here, skip all this...
1238
1239 sync ; Make sure we are clean
1240 mtspr hid0,r3 ; Set the updated hid0
1241 mfspr r1,hid0 ; Yes, this is silly, keep it here
1242 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1243 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1244 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1245 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1246 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1247
1248 eE64NoNap: crnot wasNapping,cr0_eq ; Remember if we were napping
1249 andi. r1,r11,T_FAM ; Check FAM bit
1250 beq++ eEnoFAM ; Is it FAM intercept
1251 mfsrr1 r3 ; Load srr1
1252 andc r11,r11,r1 ; Clear FAM bit
1253 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1254 beq+ eEnoFAM ; From supervisor state
1255 lwz r1,spcFlags(r2) ; Load spcFlags
1256 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1257 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
1258 bne++ eEnoFAM ; Can this context be FAM intercept
1259 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
1260 li r3,0 ; Clear
1261 srwi r1,r11,2 ; divide r11 by 4
1262 oris r3,r3,0x8000 ; Set r3 to 0x80000000
1263 srw r1,r3,r1 ; Set bit for current exception
1264 and. r1,r1,r4 ; And current exception with the intercept mask
1265 beq++ eEnoFAM ; Is it FAM intercept
1266 b EXT(vmm_fam_exc)
1267
1268 .align 5
1269
1270 eEnoFAM: lwz r1,pfAvailable(r2) ; Get the CPU features flags
1271 dcbz128 0,r6 ; allocate 128-byte line with SRR0, SRR1, CR, XER, and LR
1272
1273 ;
1274 ; Remember, we are setting up CR6 with feature flags
1275 ;
1276 std r7,saver7(r13) ; Save this one
1277 mtcrf 0x80,r1 ; Put the features flags (that we care about) in the CR
1278 std r8,saver8(r13) ; Save this one
1279 mtcrf 0x40,r1 ; Put the features flags (that we care about) in the CR
1280 mfsrr0 r6 ; Get the interruption SRR0
1281 lhz r8,PP_CPU_FLAGS(r2) ; Get the flags
1282 mtcrf 0x20,r1 ; Put the features flags (that we care about) in the CR
1283 mfsrr1 r7 ; Get the interrupt SRR1
1284 rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
1285 std r6,savesrr0(r13) ; Save the SRR0
1286 mtcrf 0x02,r1 ; Put the features flags (that we care about) in the CR
1287 rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit
1288 and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on
1289 std r9,saver9(r13) ; Save this one
1290 andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set
1291 crmove featAltivec,pfAltivecb ; Set the Altivec flag
1292 std r7,savesrr1(r13) ; Save SRR1
1293 mfsprg r9,3 ; Get rupt time R11
1294 std r10,saver10(r13) ; Save this one
1295 mfsprg r6,2 ; Get interrupt time R13
1296 std r9,saver11(r13) ; Save rupt time R11
1297 mtsprg 2,r1 ; Set the feature flags
1298 std r12,saver12(r13) ; Save this one
1299 mflr r4 ; Get the LR
1300 mftb r7 ; Get the timebase
1301 std r6,saver13(r13) ; Save rupt R13
1302 std r7,ruptStamp(r2) ; Save the time stamp
1303 std r7,SAVtime(r13) ; Save the time stamp
1304
1305 bf++ wasNapping,notNappingSF ; Skip if not waking up from nap...
1306
1307 ld r6,napStamp(r2) ; Pick up nap stamp
1308 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
1309 sub r7,r7,r6 ; Subtract stamp from now
1310 ld r6,napTotal(r2) ; Pick up total
1311 add r6,r6,r7 ; Add low to total
1312 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
1313 std r6,napTotal(r2) ; Save the high total
1314 std r3,savesrr0(r13) ; Modify to return to nap/doze exit
1315
1316 notNappingSF:
1317 std r14,saver14(r13) ; Save this one
1318 std r15,saver15(r13) ; Save this one
1319 stw r0,savecr(r13) ; Save rupt CR
1320 mfctr r6 ; Get the CTR
1321 std r16,saver16(r13) ; Save this one
1322 std r4,savelr(r13) ; Save rupt LR
1323
1324 std r17,saver17(r13) ; Save this one
1325 li r7,savepmc ; Point to pmc area
1326 std r18,saver18(r13) ; Save this one
1327 lwz r17,spcFlags(r2) ; Get the special flags from per_proc
1328 std r6,savectr(r13) ; Save rupt CTR
1329 std r19,saver19(r13) ; Save this one
1330 mfdar r6 ; Get the rupt DAR
1331 std r20,saver20(r13) ; Save this one
1332
1333 dcbz128 r7,r13 ; Clear out the pmc spot
1334
1335 std r21,saver21(r13) ; Save this one
1336 std r5,savexer(r13) ; Save the rupt XER
1337 std r22,saver22(r13) ; Save this one
1338 std r23,saver23(r13) ; Save this one
1339 std r24,saver24(r13) ; Save this one
1340 std r25,saver25(r13) ; Save this one
1341 mfdsisr r7 ; Get the rupt DSISR
1342 std r26,saver26(r13) ; Save this one
1343 andis. r17,r17,hi16(perfMonitor) ; Is the performance monitor enabled?
1344 std r27,saver27(r13) ; Save this one
1345 li r10,emfp0 ; Point to floating point save
1346 std r28,saver28(r13) ; Save this one
1347 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
1348 std r29,saver29(r13) ; Save R29
1349 std r30,saver30(r13) ; Save this one
1350 std r31,saver31(r13) ; Save this one
1351 std r6,savedar(r13) ; Save the rupt DAR
1352 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1353 stw r11,saveexception(r13) ; Save the exception code
1354
1355 beq++ noPerfMonSave64 ; Performance monitor not on...
1356
1357 li r22,0 ; r22: zero
1358
1359 mfspr r23,mmcr0_gp
1360 mfspr r24,mmcr1_gp
1361 mfspr r25,mmcra_gp
1362 std r23,savemmcr0(r13) ; Save MMCR0
1363 std r24,savemmcr1(r13) ; Save MMCR1
1364 std r25,savemmcr2(r13) ; Save MMCRA
1365 mtspr mmcr0_gp,r22 ; Leave MMCR0 clear
1366 mtspr mmcr1_gp,r22 ; Leave MMCR1 clear
1367 mtspr mmcra_gp,r22 ; Leave MMCRA clear
1368 mfspr r23,pmc1_gp
1369 mfspr r24,pmc2_gp
1370 mfspr r25,pmc3_gp
1371 mfspr r26,pmc4_gp
1372 stw r23,savepmc+0(r13) ; Save PMC1
1373 stw r24,savepmc+4(r13) ; Save PMC2
1374 stw r25,savepmc+8(r13) ; Save PMC3
1375 stw r26,savepmc+12(r13) ; Save PMC4
1376 mfspr r23,pmc5_gp
1377 mfspr r24,pmc6_gp
1378 mfspr r25,pmc7_gp
1379 mfspr r26,pmc8_gp
1380 stw r23,savepmc+16(r13) ; Save PMC5
1381 stw r24,savepmc+20(r13) ; Save PMC6
1382 stw r25,savepmc+24(r13) ; Save PMC7
1383 stw r26,savepmc+28(r13) ; Save PMC8
1384 mtspr pmc1_gp,r22 ; Leave PMC1 clear
1385 mtspr pmc2_gp,r22 ; Leave PMC2 clear
1386 mtspr pmc3_gp,r22 ; Leave PMC3 clear
1387 mtspr pmc4_gp,r22 ; Leave PMC4 clear
1388 mtspr pmc5_gp,r22 ; Leave PMC5 clear
1389 mtspr pmc6_gp,r22 ; Leave PMC6 clear
1390 mtspr pmc7_gp,r22 ; Leave PMC7 clear
1391 mtspr pmc8_gp,r22 ; Leave PMC8 clear
1392
1393 noPerfMonSave64:
1394
1395 ;
1396 ; Everything is saved at this point, except for FPRs, and VMX registers.
1397 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1398 ;
1399
1400 lwz r25,traceMask(0) ; Get the trace mask
1401 li r0,SAVgeneral ; Get the savearea type value
1402 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1403 stb r0,SAVflags+2(r13) ; Mark valid context
1404 ori r23,r23,lo16(EXT(trcWork)) ; Get the rest
1405 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
1406 li r23,trcWork ; Get the trace work area address
1407 addi r22,r22,10 ; Adjust code so we shift into CR5
1408 li r26,0x8 ; Get start of cpu mask
1409 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1410 srw r26,r26,r19 ; Get bit position of cpu number
1411 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1412 and. r26,r26,r25 ; See if we trace this cpu
1413 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1414
1415 bne++ cr5,xcp64xit ; Skip all of this if no tracing here...
1416
1417 ;
1418 ; We select a trace entry using a compare and swap on the next entry field.
1419 ; Since we do not lock the actual trace buffer, there is a potential that
1420 ; another processor could wrap an trash our entry. Who cares?
1421 ;
1422
1423 lwz r25,traceStart(0) ; Get the start of trace table
1424 lwz r26,traceEnd(0) ; Get end of trace table
1425
1426 trcselSF: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1427
1428 addi r22,r20,LTR_size ; Point to the next trace entry
1429 cmplw r22,r26 ; Do we need to wrap the trace table?
1430 bne+ gotTrcEntSF ; No wrap, we got us a trace entry...
1431
1432 mr r22,r25 ; Wrap back to start
1433
1434 gotTrcEntSF:
1435 stwcx. r22,0,r23 ; Try to update the current pointer
1436 bne- trcselSF ; Collision, try again...
1437
1438 #if ESPDEBUG
1439 dcbf 0,r23 ; Force to memory
1440 sync
1441 #endif
1442
1443 ;
1444 ; Let us cut that trace entry now.
1445 ;
1446
1447 dcbz128 0,r20 ; Zap the trace entry
1448
1449 ld r16,ruptStamp(r2) ; Get top of time base
1450 ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not)
1451 std r16,LTR_timeHi(r20) ; Set the upper part of TB
1452 ld r1,saver1(r13) ; Get back interrupt time R1
1453 ld r18,saver2(r13) ; Get back interrupt time R2
1454 std r0,LTR_r0(r20) ; Save off register 0
1455 ld r3,saver3(r13) ; Restore this one
1456 sth r19,LTR_cpu(r20) ; Stash the cpu number
1457 std r1,LTR_r1(r20) ; Save off register 1
1458 ld r4,saver4(r13) ; Restore this one
1459 std r18,LTR_r2(r20) ; Save off register 2
1460 ld r5,saver5(r13) ; Restore this one
1461 ld r6,saver6(r13) ; Get R6
1462 std r3,LTR_r3(r20) ; Save off register 3
1463 lwz r16,savecr(r13) ; Get the CR value
1464 std r4,LTR_r4(r20) ; Save off register 4
1465 mfsrr0 r17 ; Get SRR0 back, it is still good
1466 std r5,LTR_r5(r20) ; Save off register 5
1467 std r6,LTR_r6(r20) ; Save off register 6
1468 mfsrr1 r18 ; SRR1 is still good in here
1469 stw r16,LTR_cr(r20) ; Save the CR
1470 std r17,LTR_srr0(r20) ; Save the SSR0
1471 std r18,LTR_srr1(r20) ; Save the SRR1
1472
1473 mfdar r17 ; Get this back
1474 ld r16,savelr(r13) ; Get the LR
1475 std r17,LTR_dar(r20) ; Save the DAR
1476 mfctr r17 ; Get the CTR (still good in register)
1477 std r16,LTR_lr(r20) ; Save the LR
1478 std r17,LTR_ctr(r20) ; Save off the CTR
1479 mfdsisr r17 ; Get the DSISR
1480 std r13,LTR_save(r20) ; Save the savearea
1481 stw r17,LTR_dsisr(r20) ; Save the DSISR
1482 sth r11,LTR_excpt(r20) ; Save the exception type
1483
1484 #if ESPDEBUG
1485 dcbf 0,r20 ; Force to memory
1486 sync ; Make sure it all goes
1487 #endif
1488 xcp64xit: mr r14,r11 ; Save the interrupt code across the call
1489 bl EXT(save_get_phys_64) ; Grab a savearea
1490 mfsprg r2,0 ; Get the per_proc info
1491 li r10,emfp0 ; Point to floating point save
1492 mr r11,r14 ; Get the exception code back
1493 dcbz128 r10,r2 ; Clear for speed
1494 std r3,next_savearea(r2) ; Store the savearea for the next rupt
1495 b xcpCommon ; Go join the common interrupt processing...
1496
1497 ;
1498 ; All of the context is saved. Now we will get a
1499 ; fresh savearea. After this we can take an interrupt.
1500 ;
1501
1502 .align 5
1503
1504 xcpCommon:
1505
1506 ;
1507 ; Here we will save some floating point and vector status
1508 ; and we also set a clean default status for a new interrupt level.
1509 ; Note that we assume that emfp0 is on an altivec boundary
1510 ; and that R10 points to it (as a displacemnt from R2).
1511 ;
1512 ; We need to save the FPSCR as if it is normal context.
1513 ; This is because pending exceptions will cause an exception even if
1514 ; FP is disabled. We need to clear the FPSCR when we first start running in the
1515 ; kernel.
1516 ;
1517
1518 stfd f0,emfp0(r2) ; Save FPR0
1519 stfd f1,emfp1(r2) ; Save FPR1
1520 li r19,0 ; Assume no Altivec
1521 mffs f0 ; Get the FPSCR
1522 lfd f1,Zero(0) ; Make a 0
1523 stfd f0,savefpscrpad(r13) ; Save the FPSCR
1524 li r9,0 ; Get set to clear VRSAVE
1525 mtfsf 0xFF,f1 ; Clear it
1526 addi r14,r10,16 ; Displacement to second vector register
1527 lfd f0,emfp0(r2) ; Restore FPR0
1528 la r28,savevscr(r13) ; Point to the status area
1529 lfd f1,emfp1(r2) ; Restore FPR1
1530
1531 bf featAltivec,noavec ; No Altivec on this CPU...
1532
1533 stvxl v0,r10,r2 ; Save a register
1534 stvxl v1,r14,r2 ; Save a second register
1535 mfspr r19,vrsave ; Get the VRSAVE register
1536 mfvscr v0 ; Get the vector status register
1537 vspltish v1,1 ; Turn on the non-Java bit and saturate
1538 stvxl v0,0,r28 ; Save the vector status
1539 vspltisw v0,1 ; Turn on the saturate bit
1540 vxor v1,v1,v0 ; Turn off saturate
1541 mtvscr v1 ; Set the non-java, no saturate status for new level
1542 mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level
1543
1544 lvxl v0,r10,r2 ; Restore first work register
1545 lvxl v1,r14,r2 ; Restore second work register
1546
1547 noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags
1548
1549 ;
1550 ; We are now done saving all of the context. Start filtering the interrupts.
1551 ; Note that a Redrive will count as an actual interrupt.
1552 ; Note also that we take a lot of system calls so we will start decode here.
1553 ;
1554
1555 Redrive:
1556
1557
1558 #if INSTRUMENT
1559 mfspr r20,pmc1 ; INSTRUMENT - saveinstr[4] - Take stamp before exception filter
1560 stw r20,0x6100+(0x04*16)+0x0(0) ; INSTRUMENT - Save it
1561 mfspr r20,pmc2 ; INSTRUMENT - Get stamp
1562 stw r20,0x6100+(0x04*16)+0x4(0) ; INSTRUMENT - Save it
1563 mfspr r20,pmc3 ; INSTRUMENT - Get stamp
1564 stw r20,0x6100+(0x04*16)+0x8(0) ; INSTRUMENT - Save it
1565 mfspr r20,pmc4 ; INSTRUMENT - Get stamp
1566 stw r20,0x6100+(0x04*16)+0xC(0) ; INSTRUMENT - Save it
1567 #endif
1568 lwz r22,SAVflags(r13) ; Pick up the flags
1569 lwz r0,saver0+4(r13) ; Get back interrupt time syscall number
1570 mfsprg r2,0 ; Restore per_proc
1571
1572 li r20,lo16(xcpTable) ; Point to the vector table (note: this must be in 1st 64k of physical memory)
1573 la r12,hwCounts(r2) ; Point to the exception count area
1574 rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving
1575 add r12,r12,r11 ; Point to the count
1576 lwzx r20,r20,r11 ; Get the interrupt handler
1577 lwz r25,0(r12) ; Get the old value
1578 lwz r23,hwRedrives(r2) ; Get the redrive count
1579 xori r24,r22,1 ; Get the NOT of the redrive
1580 mtctr r20 ; Point to the interrupt handler
1581 mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code
1582 add r25,r25,r24 ; Count this one if not a redrive
1583 add r23,r23,r24 ; Count this one if if is a redrive
1584 crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x
1585 stw r25,0(r12) ; Store it back
1586 stw r23,hwRedrives(r2) ; Save the redrive count
1587 bctr ; Go process the exception...
1588
1589
1590 ;
1591 ; Exception vector filter table
1592 ;
1593
1594 .align 7
1595
1596 xcpTable:
1597 .long EatRupt ; T_IN_VAIN
1598 .long PassUpTrap ; T_RESET
1599 .long MachineCheck ; T_MACHINE_CHECK
1600 .long EXT(handlePF) ; T_DATA_ACCESS
1601 .long EXT(handlePF) ; T_INSTRUCTION_ACCESS
1602 .long PassUpRupt ; T_INTERRUPT
1603 .long EXT(AlignAssist) ; T_ALIGNMENT
1604 .long EXT(Emulate) ; T_PROGRAM
1605 .long PassUpFPU ; T_FP_UNAVAILABLE
1606 .long PassUpRupt ; T_DECREMENTER
1607 .long PassUpTrap ; T_IO_ERROR
1608 .long PassUpTrap ; T_RESERVED
1609 .long xcpSyscall ; T_SYSTEM_CALL
1610 .long PassUpTrap ; T_TRACE
1611 .long PassUpTrap ; T_FP_ASSIST
1612 .long PassUpTrap ; T_PERF_MON
1613 .long PassUpVMX ; T_VMX
1614 .long PassUpTrap ; T_INVALID_EXCP0
1615 .long PassUpTrap ; T_INVALID_EXCP1
1616 .long PassUpTrap ; T_INVALID_EXCP2
1617 .long PassUpTrap ; T_INSTRUCTION_BKPT
1618 .long PassUpRupt ; T_SYSTEM_MANAGEMENT
1619 .long EXT(AltivecAssist) ; T_ALTIVEC_ASSIST
1620 .long PassUpRupt ; T_THERMAL
1621 .long PassUpTrap ; T_INVALID_EXCP5
1622 .long PassUpTrap ; T_INVALID_EXCP6
1623 .long PassUpTrap ; T_INVALID_EXCP7
1624 .long PassUpTrap ; T_INVALID_EXCP8
1625 .long PassUpTrap ; T_INVALID_EXCP9
1626 .long PassUpTrap ; T_INVALID_EXCP10
1627 .long PassUpTrap ; T_INVALID_EXCP11
1628 .long PassUpTrap ; T_INVALID_EXCP12
1629 .long PassUpTrap ; T_INVALID_EXCP13
1630
1631 .long PassUpTrap ; T_RUNMODE_TRACE
1632
1633 .long PassUpRupt ; T_SIGP
1634 .long PassUpTrap ; T_PREEMPT
1635 .long conswtch ; T_CSWITCH
1636 .long PassUpRupt ; T_SHUTDOWN
1637 .long PassUpAbend ; T_CHOKE
1638
1639 .long EXT(handleDSeg) ; T_DATA_SEGMENT
1640 .long EXT(handleISeg) ; T_INSTRUCTION_SEGMENT
1641
1642 .long WhoaBaby ; T_SOFT_PATCH
1643 .long WhoaBaby ; T_MAINTENANCE
1644 .long WhoaBaby ; T_INSTRUMENTATION
1645
1646 ;
1647 ; Just what the heck happened here????
1648 ;
1649
1650 .align 5
1651
1652 WhoaBaby: b . ; Open the hood and wait for help
1653
1654
1655 ;
1656 ; System call
1657 ;
1658
1659 .align 5
1660
1661 xcpSyscall: lis r20,hi16(EXT(shandler)) ; Assume this is a normal one, get handler address
1662 rlwinm r6,r0,1,0,31 ; Move sign bit to the end
1663 ori r20,r20,lo16(EXT(shandler)) ; Assume this is a normal one, get handler address
1664 bnl++ cr0,PassUp ; R0 not 0b10xxx...x, can not be any kind of magical system call, just pass it up...
1665 lwz r7,savesrr1+4(r13) ; Get the entering MSR (low half)
1666 lwz r1,dgFlags(0) ; Get the flags
1667 cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it
1668
1669 rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state?
1670 beq++ FCisok ; From supervisor state...
1671
1672 rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid?
1673 beq++ PassUp ; No, treat as a normal one...
1674
1675 FCisok: beq++ cr2,EatRupt ; This is a CutTrace system call, we are done with it...
1676
1677 ;
1678 ; Here is where we call the firmware. If it returns T_IN_VAIN, that means
1679 ; that it has handled the interruption. Remember: thou shalt not trash R13
1680 ; while you are away. Anything else is ok.
1681 ;
1682
1683 lwz r3,saver3+4(r13) ; Restore the first parameter
1684 b EXT(FirmwareCall) ; Go handle the firmware call....
1685
1686 ;
1687 ; Here is where we return from the firmware call
1688 ;
1689
1690 .align 5
1691 .globl EXT(FCReturn)
1692
1693 LEXT(FCReturn)
1694 cmplwi r3,T_IN_VAIN ; Was it handled?
1695 beq+ EatRupt ; Interrupt was handled...
1696 mr r11,r3 ; Put the rupt code into the right register
1697 b Redrive ; Go through the filter again...
1698
1699
1700 ;
1701 ; Here is where we return from the PTE miss and segment exception handler
1702 ;
1703
1704 .align 5
1705 .globl EXT(PFSExit)
1706
1707 LEXT(PFSExit)
1708
1709 #if 0
1710 mfsprg r2,0 ; (BRINGUP)
1711 lwz r0,savedsisr(r13) ; (BRINGUP)
1712 andis. r0,r0,hi16(dsiAC) ; (BRINGUP)
1713 beq++ didnthit ; (BRINGUP)
1714 lwz r0,20(0) ; (BRINGUP)
1715 mr. r0,r0 ; (BRINGUP)
1716 bne-- didnthit ; (BRINGUP)
1717 #if 0
1718 li r0,1 ; (BRINGUP)
1719 stw r0,20(0) ; (BRINGUP)
1720 lis r0,hi16(Choke) ; (BRINGUP)
1721 ori r0,r0,lo16(Choke) ; (BRINGUP)
1722 sc ; (BRINGUP)
1723 #endif
1724
1725 lwz r4,savesrr0+4(r13) ; (BRINGUP)
1726 lwz r8,savesrr1+4(r13) ; (BRINGUP)
1727 lwz r6,savedar+4(r13) ; (BRINGUP)
1728 rlwinm. r0,r8,0,MSR_IR_BIT,MSR_IR_BIT ; (BRINGUP)
1729 mfmsr r9 ; (BRINGUP)
1730 ori r0,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
1731 beq-- hghg ; (BRINGUP)
1732 mtmsr r0 ; (BRINGUP)
1733 isync ; (BRINGUP)
1734
1735 hghg: lwz r5,0(r4) ; (BRINGUP)
1736 beq-- hghg1 ; (BRINGUP)
1737 mtmsr r9 ; (BRINGUP)
1738 isync ; (BRINGUP)
1739
1740 hghg1: rlwinm r7,r5,6,26,31 ; (BRINGUP)
1741 rlwinm r27,r5,14,24,28 ; (BRINGUP)
1742 addi r3,r13,saver0+4 ; (BRINGUP)
1743 lwzx r3,r3,r27 ; (BRINGUP)
1744
1745 #if 0
1746 lwz r27,patcharea+4(r2) ; (BRINGUP)
1747 mr. r3,r3 ; (BRINGUP)
1748 bne++ nbnbnb ; (BRINGUP)
1749 addi r27,r27,1 ; (BRINGUP)
1750 stw r27,patcharea+4(r2) ; (BRINGUP)
1751 nbnbnb:
1752 #endif
1753
1754 rlwinm. r28,r8,0,MSR_DR_BIT,MSR_DR_BIT ; (BRINGUP)
1755 rlwinm r27,r6,0,0,29 ; (BRINGUP)
1756 ori r28,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
1757 mfspr r10,dabr ; (BRINGUP)
1758 li r0,0 ; (BRINGUP)
1759 mtspr dabr,r0 ; (BRINGUP)
1760 cmplwi cr1,r7,31 ; (BRINGUP)
1761 beq-- qqq0 ; (BRINGUP)
1762 mtmsr r28 ; (BRINGUP)
1763 qqq0:
1764 isync ; (BRINGUP)
1765
1766 lwz r27,0(r27) ; (BRINGUP) - Get original value
1767
1768 bne cr1,qqq1 ; (BRINGUP)
1769
1770 rlwinm r5,r5,31,22,31 ; (BRINGUP)
1771 cmplwi cr1,r5,151 ; (BRINGUP)
1772 beq cr1,qqq3 ; (BRINGUP)
1773 cmplwi cr1,r5,407 ; (BRINGUP)
1774 beq cr1,qqq2 ; (BRINGUP)
1775 cmplwi cr1,r5,215 ; (BRINGUP)
1776 beq cr1,qqq0q ; (BRINGUP)
1777 cmplwi cr1,r5,1014 ; (BRINGUP)
1778 beq cr1,qqqm1 ; (BRINGUP)
1779
1780 lis r0,hi16(Choke) ; (BRINGUP)
1781 ori r0,r0,lo16(Choke) ; (BRINGUP)
1782 sc ; (BRINGUP)
1783
1784 qqqm1: rlwinm r7,r6,0,0,26 ; (BRINGUP)
1785 stw r0,0(r7) ; (BRINGUP)
1786 stw r0,4(r7) ; (BRINGUP)
1787 stw r0,8(r7) ; (BRINGUP)
1788 stw r0,12(r7) ; (BRINGUP)
1789 stw r0,16(r7) ; (BRINGUP)
1790 stw r0,20(r7) ; (BRINGUP)
1791 stw r0,24(r7) ; (BRINGUP)
1792 stw r0,28(r7) ; (BRINGUP)
1793 b qqq9
1794
1795 qqq1: cmplwi r7,38 ; (BRINGUP)
1796 bgt qqq2 ; (BRINGUP)
1797 blt qqq3 ; (BRINGUP)
1798
1799 qqq0q: stb r3,0(r6) ; (BRINGUP)
1800 b qqq9 ; (BRINGUP)
1801
1802 qqq2: sth r3,0(r6) ; (BRINGUP)
1803 b qqq9 ; (BRINGUP)
1804
1805 qqq3: stw r3,0(r6) ; (BRINGUP)
1806
1807 qqq9:
1808 #if 0
1809 rlwinm r7,r6,0,0,29 ; (BRINGUP)
1810 lwz r0,0(r7) ; (BRINGUP) - Get newest value
1811 #else
1812 lis r7,hi16(0x000792B8) ; (BRINGUP)
1813 ori r7,r7,lo16(0x000792B8) ; (BRINGUP)
1814 lwz r0,0(r7) ; (BRINGUP) - Get newest value
1815 #endif
1816 mtmsr r9 ; (BRINGUP)
1817 mtspr dabr,r10 ; (BRINGUP)
1818 isync ; (BRINGUP)
1819
1820 #if 0
1821 lwz r28,patcharea+12(r2) ; (BRINGUP)
1822 mr. r28,r28 ; (BRINGUP)
1823 bne++ qqq12 ; (BRINGUP)
1824 lis r28,0x4000 ; (BRINGUP)
1825
1826 qqq12: stw r27,0(r28) ; (BRINGUP)
1827 lwz r6,savedar+4(r13) ; (BRINGUP)
1828 stw r0,4(r28) ; (BRINGUP)
1829 stw r4,8(r28) ; (BRINGUP)
1830 stw r6,12(r28) ; (BRINGUP)
1831 addi r28,r28,16 ; (BRINGUP)
1832 mr. r3,r3 ; (BRINGUP)
1833 stw r28,patcharea+12(r2) ; (BRINGUP)
1834 lwz r10,patcharea+8(r2) ; (BRINGUP)
1835 lwz r0,patcharea+4(r2) ; (BRINGUP)
1836 #endif
1837
1838 #if 1
1839 stw r0,patcharea(r2) ; (BRINGUP)
1840 #endif
1841
1842 #if 0
1843 xor r28,r0,r27 ; (BRINGUP) - See how much it changed
1844 rlwinm r28,r28,24,24,31 ; (BRINGUP)
1845 cmplwi r28,1 ; (BRINGUP)
1846
1847 ble++ qqq10 ; (BRINGUP)
1848
1849 mr r7,r0 ; (BRINGUP)
1850 li r0,1 ; (BRINGUP)
1851 stw r0,20(0) ; (BRINGUP)
1852 lis r0,hi16(Choke) ; (BRINGUP)
1853 ori r0,r0,lo16(Choke) ; (BRINGUP)
1854 sc ; (BRINGUP)
1855 #endif
1856
1857
1858 qqq10: addi r4,r4,4 ; (BRINGUP)
1859 stw r4,savesrr0+4(r13) ; (BRINGUP)
1860
1861 li r11,T_IN_VAIN ; (BRINGUP)
1862 b EatRupt ; (BRINGUP)
1863
1864 didnthit: ; (BRINGUP)
1865 #endif
1866 #if 0
1867 lwz r0,20(0) ; (BRINGUP)
1868 mr. r0,r0 ; (BRINGUP)
1869 beq++ opopop ; (BRINGUP)
1870 li r0,0 ; (BRINGUP)
1871 stw r0,20(0) ; (BRINGUP)
1872 lis r0,hi16(Choke) ; (BRINGUP)
1873 ori r0,r0,lo16(Choke) ; (BRINGUP)
1874 sc ; (BRINGUP)
1875 opopop:
1876 #endif
1877 lwz r0,savesrr1+4(r13) ; Get the MSR in use at exception time
1878 cmplwi cr1,r11,T_IN_VAIN ; Was it handled?
1879 rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1880 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
1881 beq-- NoFamPf
1882 mfsprg r2,0 ; Get back per_proc
1883 lwz r1,spcFlags(r2) ; Load spcFlags
1884 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1885 cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode
1886 bne-- cr0,NoFamPf
1887 lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept
1888 li r5,0 ; Clear
1889 srwi r1,r11,2 ; divide r11 by 4
1890 oris r5,r5,0x8000 ; Set r5 to 0x80000000
1891 srw r1,r5,r1 ; Set bit for current exception
1892 and. r1,r1,r6 ; And current exception with the intercept mask
1893 beq++ NoFamPf ; Is it FAM intercept
1894 bl EXT(vmm_fam_pf)
1895 b EatRupt
1896
1897 NoFamPf: andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on
1898 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
1899 add r0,r0,r0 ; Get 0xFFFFFFFF00000000
1900 beq++ PassUpTrap ; Not on, normal case...
1901 ;
1902 ; Here is where we handle the "recovery mode" stuff.
1903 ; This is set by an emulation routine to trap any faults when it is fetching data or
1904 ; instructions.
1905 ;
1906 ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0
1907 ; and R1 to the DAR and DSISR, respectively.
1908 ;
1909 lwz r3,savesrr0(r13) ; Get the failing instruction address
1910 lwz r4,savesrr0+4(r13) ; Get the failing instruction address
1911 lwz r5,savecr(r13) ; Get the condition register
1912 or r4,r4,r0 ; Fill the high part with foxes
1913 lwz r0,savedar(r13) ; Get the DAR
1914 addic r4,r4,4 ; Skip failing instruction
1915 lwz r6,savedar+4(r13) ; Get the DAR
1916 addze r3,r3 ; Propagate carry
1917 rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed
1918 lwz r7,savedsisr(r13) ; Grab the DSISR
1919 stw r3,savesrr0(r13) ; Save resume address
1920 stw r4,savesrr0+4(r13) ; Save resume address
1921 stw r5,savecr(r13) ; And the resume CR
1922 stw r0,saver0(r13) ; Pass back the DAR
1923 stw r6,saver0+4(r13) ; Pass back the DAR
1924 stw r7,saver1+4(r13) ; Pass back the DSISR
1925 b EatRupt ; Resume emulated code
1926
1927 ;
1928 ; Here is where we handle the context switch firmware call. The old
1929 ; context has been saved. The new savearea is in kind of hokey, the high order
1930 ; half is stored in saver7 and the low half is in saver3. We will just
1931 ; muck around with the savearea pointers, and then join the exit routine
1932 ;
1933
1934 .align 5
1935
1936 conswtch:
1937 li r0,0xFFF ; Get page boundary
1938 mr r29,r13 ; Save the save
1939 andc r30,r13,r0 ; Round down to page boundary (64-bit safe)
1940 lwz r5,saver3+4(r13) ; Switch to the new savearea
1941 bf-- pf64Bitb,xcswNo64 ; Not 64-bit...
1942 lwz r6,saver7+4(r13) ; Get the high order half
1943 sldi r6,r6,32 ; Position high half
1944 or r5,r5,r6 ; Merge them
1945
1946 xcswNo64: lwz r30,SACvrswap+4(r30) ; get real to virtual translation
1947 mr r13,r5 ; Switch saveareas
1948 li r0,0 ; Clear this
1949 xor r27,r29,r30 ; Flip to virtual
1950 stw r0,saver3(r5) ; Push the new virtual savearea to the switch to routine
1951 stw r27,saver3+4(r5) ; Push the new virtual savearea to the switch to routine
1952 b EatRupt ; Start it up...
1953
1954 ;
1955 ; Handle machine check here.
1956 ;
1957 ; ?
1958 ;
1959
1960 .align 5
1961
1962 MachineCheck:
1963
1964 bt++ pf64Bitb,mck64 ; ?
1965
1966 lwz r27,savesrr1+4(r13) ; Pick up srr1
1967
1968 ;
1969 ; Check if the failure was in
1970 ; ml_probe_read. If so, this is expected, so modify the PC to
1971 ; ml_proble_read_mck and then eat the exception.
1972 ;
1973 lwz r30,savesrr0+4(r13) ; Get the failing PC
1974 lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part
1975 lis r27,hi16(EXT(ml_probe_read)) ; High order part
1976 ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part
1977 ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part
1978 cmplw r30,r28 ; Check highest possible
1979 cmplw cr1,r30,r27 ; Check lowest
1980 bge- PassUpTrap ; Outside of range
1981 blt- cr1,PassUpTrap ; Outside of range
1982 ;
1983 ; We need to fix up the BATs here because the probe
1984 ; routine messed them all up... As long as we are at it,
1985 ; fix up to return directly to caller of probe.
1986 ;
1987
1988 lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
1989 ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
1990
1991 lwz r30,0(r11) ; Pick up DBAT 0 high
1992 lwz r28,4(r11) ; Pick up DBAT 0 low
1993 lwz r27,8(r11) ; Pick up DBAT 1 high
1994 lwz r18,16(r11) ; Pick up DBAT 2 high
1995 lwz r11,24(r11) ; Pick up DBAT 3 high
1996
1997 sync
1998 mtdbatu 0,r30 ; Restore DBAT 0 high
1999 mtdbatl 0,r28 ; Restore DBAT 0 low
2000 mtdbatu 1,r27 ; Restore DBAT 1 high
2001 mtdbatu 2,r18 ; Restore DBAT 2 high
2002 mtdbatu 3,r11 ; Restore DBAT 3 high
2003 sync
2004
2005 lwz r28,savelr+4(r13) ; Get return point
2006 lwz r27,saver0+4(r13) ; Get the saved MSR
2007 li r30,0 ; Get a failure RC
2008 stw r28,savesrr0+4(r13) ; Set the return point
2009 stw r27,savesrr1+4(r13) ; Set the continued MSR
2010 stw r30,saver3+4(r13) ; Set return code
2011 b EatRupt ; Yum, yum, eat it all up...
2012
2013 ;
2014 ; 64-bit machine checks
2015 ;
2016
2017 mck64:
2018
2019 ;
2020 ; NOTE: WE NEED TO RETHINK RECOVERABILITY A BIT - radar 3167190
2021 ;
2022
2023 ld r23,savesrr0(r13) ; Grab the SRR0 in case we need bad instruction
2024 ld r20,savesrr1(r13) ; Grab the SRR1 so we can decode the thing
2025 lwz r21,savedsisr(r13) ; We might need this in a bit
2026 ld r22,savedar(r13) ; We might need this in a bit
2027
2028 lis r8,AsyMCKSrc ; Get the Async MCK Source register address
2029 mfsprg r19,2 ; Get the feature flags
2030 ori r8,r8,0x8000 ; Set to read data
2031 rlwinm. r0,r19,0,pfSCOMFixUpb,pfSCOMFixUpb ; Do we need to fix the SCOM data?
2032
2033 sync
2034
2035 mtspr scomc,r8 ; Request the MCK source
2036 mfspr r24,scomd ; Get the source
2037 mfspr r8,scomc ; Get back the status (we just ignore it)
2038 sync
2039 isync
2040
2041 lis r8,AsyMCKRSrc ; Get the Async MCK Source AND mask address
2042 li r9,0 ; Get and AND mask of 0
2043
2044 sync
2045
2046 mtspr scomd,r9 ; Set the AND mask to 0
2047 mtspr scomc,r8 ; Write the AND mask and clear conditions
2048 mfspr r8,scomc ; Get back the status (we just ignore it)
2049 sync
2050 isync
2051
2052 lis r8,cFIR ; Get the Core FIR register address
2053 ori r8,r8,0x8000 ; Set to read data
2054
2055 sync
2056
2057 mtspr scomc,r8 ; Request the Core FIR
2058 mfspr r25,scomd ; Get the source
2059 mfspr r8,scomc ; Get back the status (we just ignore it)
2060 sync
2061 isync
2062
2063 lis r8,cFIRrst ; Get the Core FIR AND mask address
2064
2065 sync
2066
2067 mtspr scomd,r9 ; Set the AND mask to 0
2068 mtspr scomc,r8 ; Write the AND mask and clear conditions
2069 mfspr r8,scomc ; Get back the status (we just ignore it)
2070 sync
2071 isync
2072
2073 ; Note: bug in early chips where scom reads are shifted right by 1. We fix that here.
2074 ; Also note that we will lose bit 63
2075
2076 beq++ mckNoFix ; No fix up is needed
2077 sldi r24,r24,1 ; Shift left 1
2078 sldi r25,r25,1 ; Shift left 1
2079
2080 mckNoFix: std r24,savemisc0(r13) ; Save the MCK source in case we pass the error
2081 std r25,savemisc1(r13) ; Save the Core FIR in case we pass the error
2082
2083 rlwinm. r0,r20,0,mckIFUE-32,mckIFUE-32 ; Is this some kind of uncorrectable?
2084 bne mckUE ; Yeah...
2085
2086 rlwinm. r0,r20,0,mckLDST-32,mckLDST-32 ; Some kind of load/store error?
2087 bne mckHandleLDST ; Yes...
2088
2089 rldicl. r0,r20,46,62 ; Get the error cause code
2090 beq mckNotSure ; We need some more checks for this one...
2091
2092 cmplwi r0,2 ; Check for TLB parity error
2093 blt mckSLBparity ; This is an SLB parity error...
2094 bgt mckhIFUE ; This is an IFetch tablewalk reload UE...
2095
2096 ; IFetch TLB parity error
2097
2098 isync
2099 tlbiel r23 ; Locally invalidate TLB entry for iaddr
2100 sync ; Wait for it
2101 b EatRupt ; All recovered...
2102
2103 ; SLB parity error. This could be software caused. We get one if there is
2104 ; more than 1 valid SLBE with a matching ESID. That one we do not want to
2105 ; try to recover from. Search for it and if we get it, panic.
2106
2107 mckSLBparity:
2108 crclr cr0_eq ; Make sure we are not equal so we take correct exit
2109
2110 la r3,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2111 li r5,0 ; Start with index 0
2112
2113 mckSLBck: la r4,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2114 slbmfee r6,r5 ; Get the next SLBE
2115 andis. r0,r6,0x0800 ; See if valid bit is on
2116 beq mckSLBnx ; Skip invalid and go to next
2117
2118 mckSLBck2: cmpld r4,r3 ; Have we reached the end of the table?
2119 beq mckSLBne ; Yes, go enter this one...
2120 ld r7,0(r4) ; Pick up the saved ESID
2121 cmpld r6,r7 ; Is this a match?
2122 beq mckSLBrec ; Whoops, I did bad, recover and pass up...
2123 addi r4,r4,8 ; Next table entry
2124 b mckSLBck2 ; Check the next...
2125
2126 mckSLBnx: addi r5,r5,1 ; Point to next SLBE
2127 cmplwi r5,64 ; Have we checked all of them?
2128 bne++ mckSLBck ; Not yet, check again...
2129 b mckSLBrec ; We looked at them all, go recover...
2130
2131 mckSLBne: std r6,0(r3) ; Save this ESID
2132 addi r3,r3,8 ; Point to the new slot
2133 b mckSLBnx ; Go do the next SLBE...
2134
2135 ; Recover an SLB error
2136
2137 mckSLBrec: li r0,0 ; Set an SLB slot index of 0
2138 slbia ; Trash all SLB entries (except for entry 0 that is)
2139 slbmfee r7,r0 ; Get the entry that is in SLB index 0
2140 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
2141 slbie r7 ; Invalidate it
2142
2143 li r3,0 ; Set the first SLBE
2144
2145 mckSLBclr: slbmte r0,r3 ; Clear the whole entry to 0s
2146 addi r3,r3,1 ; Bump index
2147 cmplwi cr1,r3,64 ; Have we done them all?
2148 bne++ cr1,mckSLBclr ; Yup....
2149
2150 sth r3,ppInvSeg(r2) ; Store non-zero to trigger SLB reload
2151 bne++ EatRupt ; This was not a programming error, all recovered...
2152 b PassUpTrap ; Pass the software error up...
2153
2154 ;
2155 ; Handle a load/store unit error. We need to decode the DSISR
2156 ;
2157
2158 mckHandleLDST:
2159 rlwinm. r0,r21,0,mckL1DCPE,mckL1DCPE ; An L1 data cache parity error?
2160 bne++ mckL1D ; Yeah, we dealt with this back in the vector...
2161
2162 rlwinm. r0,r21,0,mckL1DTPE,mckL1DTPE ; An L1 tag error?
2163 bne++ mckL1T ; Yeah, we dealt with this back in the vector...
2164
2165 rlwinm. r0,r21,0,mckUEdfr,mckUEdfr ; Is the a "deferred" UE?
2166 bne mckDUE ; Yeah, go see if expected...
2167
2168 rlwinm. r0,r21,0,mckUETwDfr,mckUETwDfr ; Is the a "deferred" tablewalk UE?
2169 bne mckDTW ; Yeah, no recovery...
2170
2171 rlwinm. r0,r21,0,mckSLBPE,mckSLBPE ; SLB parity error?
2172 bne mckSLBparity ; Yeah, go attempt recovery....
2173
2174 ; This is a recoverable D-ERAT or TLB error
2175
2176 la r9,hwMckERCPE(r2) ; Get DERAT parity error count
2177
2178 mckInvDAR: isync
2179 tlbiel r22 ; Locally invalidate the TLB entry
2180 sync
2181
2182 lwz r21,0(r9) ; Get count
2183 addi r21,r21,1 ; Count this one
2184 stw r21,0(r9) ; Stick it back
2185
2186 b EatRupt ; All recovered...
2187
2188 ;
2189 ; When we come here, we are not quite sure what the error is. We need to
2190 ; dig a bit further.
2191 ;
2192 ; R24 is interrupt source
2193 ; R25 is Core FIR
2194 ;
2195 ; Note that both have been cleared already.
2196 ;
2197
2198 mckNotSure:
2199 rldicl. r0,r24,AsyMCKfir+1,63 ; Something in the FIR?
2200 bne-- mckFIR ; Yup, go check some more...
2201
2202 rldicl. r0,r24,AsyMCKhri+1,63 ; Hang recovery?
2203 bne-- mckHangRcvr ; Yup...
2204
2205 rldicl. r0,r24,AsyMCKext+1,63 ; External signal?
2206 bne-- mckExtMck ; Yup...
2207
2208 ;
2209 ; We really do not know what this one is or what to do with it...
2210 ;
2211
2212 mckUnk: lwz r21,hwMckUnk(r2) ; Get unknown error count
2213 addi r21,r21,1 ; Count it
2214 stw r21,hwMckUnk(r2) ; Stuff it
2215 b PassUpTrap ; Go south, young man...
2216
2217 ;
2218 ; Hang recovery. This is just a notification so we only count.
2219 ;
2220
2221 mckHangRcrvr:
2222 lwz r21,hwMckHang(r2) ; Get hang recovery count
2223 addi r21,r21,1 ; Count this one
2224 stw r21,hwMckHang(r2) ; Stick it back
2225 b EatRupt ; All recovered...
2226
2227 ;
2228 ; Externally signaled MCK. No recovery for the moment, but we this may be
2229 ; where we handle ml_probe_read problems eventually.
2230 ;
2231 mckExtMck:
2232 lwz r21,hwMckHang(r2) ; Get hang recovery count
2233 addi r21,r21,1 ; Count this one
2234 stw r21,hwMckHang(r2) ; Stick it back
2235 b EatRupt ; All recovered...
2236
2237 ;
2238 ; Machine check cause is in a FIR. Suss it out here.
2239 ; Core FIR is in R25 and has been cleared in HW.
2240 ;
2241
2242 mckFIR: rldicl. r0,r25,cFIRICachePE+1,63 ; I-Cache parity error?
2243 la r19,hwMckICachePE(r2) ; Point to counter
2244 bne mckInvICache ; Go invalidate I-Cache...
2245
2246 rldicl. r0,r25,cFIRITagPE0+1,63 ; I-Cache tag parity error?
2247 la r19,hwMckITagPE(r2) ; Point to counter
2248 bne mckInvICache ; Go invalidate I-Cache...
2249
2250 rldicl. r0,r25,cFIRITagPE1+1,63 ; I-Cache tag parity error?
2251 la r19,hwMckITagPE(r2) ; Point to counter
2252 bne mckInvICache ; Go invalidate I-Cache...
2253
2254 rldicl. r0,r25,cFIRIEratPE+1,63 ; IERAT parity error?
2255 la r19,hwMckIEratPE(r2) ; Point to counter
2256 bne mckInvERAT ; Go invalidate ERATs...
2257
2258 rldicl. r0,r25,cFIRIFUL2UE+1,63 ; IFetch got L2 UE?
2259 bne mckhIFUE ; Go count and pass up...
2260
2261 rldicl. r0,r25,cFIRDCachePE+1,63 ; D-Cache PE?
2262 bne mckL1D ; Handled, just go count...
2263
2264 rldicl. r0,r25,cFIRDTagPE+1,63 ; D-Cache tag PE?
2265 bne mckL1T ; Handled, just go count...
2266
2267 rldicl. r0,r25,cFIRDEratPE+1,63 ; DERAT PE?
2268 la r19,hwMckDEratPE(r2) ; Point to counter
2269 bne mckInvERAT ; Go invalidate ERATs...
2270
2271 rldicl. r0,r25,cFIRTLBPE+1,63 ; TLB PE?
2272 la r9,hwMckTLBPE(r2) ; Get TLB parity error count
2273 bne mckInvDAR ; Go recover...
2274
2275 rldicl. r0,r25,cFIRSLBPE+1,63 ; SLB PE?
2276 bne mckSLBparity ; Cope with it...
2277
2278 b mckUnk ; Have not a clue...
2279
2280 ;
2281 ; General recovery for I-Cache errors. Just flush it completely.
2282 ;
2283
2284 .align 7 ; Force into cache line
2285
2286 mckInvICache:
2287 lis r0,0x0080 ; Get a 0x0080 (bit 9 >> 32)
2288 mfspr r21,hid1 ; Get the current HID1
2289 sldi r0,r0,32 ; Get the "forced ICBI match" bit
2290 or r0,r0,r21 ; Set forced match
2291
2292 isync
2293 mtspr hid1,r0 ; Stick it
2294 mtspr hid1,r0 ; Stick it again
2295 isync
2296
2297 li r6,0 ; Start at 0
2298
2299 mckIcbi: icbi 0,r6 ; Kill I$
2300 addi r6,r6,128 ; Next line
2301 andis. r5,r6,1 ; Have we done them all?
2302 beq++ mckIcbi ; Not yet...
2303
2304 isync
2305 mtspr hid1,r21 ; Restore original HID1
2306 mtspr hid1,r21 ; Stick it again
2307 isync
2308
2309 lwz r5,0(r19) ; Get the counter
2310 addi r5,r5,1 ; Count it
2311 stw r5,0(r19) ; Stuff it back
2312 b EatRupt ; All recovered...
2313
2314
2315 ; General recovery for ERAT problems - handled in exception vector already
2316
2317 mckInvERAT: lwz r21,0(r19) ; Get the exception count spot
2318 addi r21,r21,1 ; Count this one
2319 stw r21,0(r19) ; Save count
2320 b EatRupt ; All recovered...
2321
2322 ; General hang recovery - this is a notification only, just count.
2323
2324 mckHangRcvr:
2325 lwz r21,hwMckHang(r2) ; Get hang recovery count
2326 addi r21,r21,1 ; Count this one
2327 stw r21,hwMckHang(r2) ; Stick it back
2328 b EatRupt ; All recovered...
2329
2330
2331 ;
2332 ; These are the uncorrectable errors, just count them then pass it along.
2333 ;
2334
2335 mckUE: lwz r21,hwMckUE(r2) ; Get general uncorrectable error count
2336 addi r21,r21,1 ; Count it
2337 stw r21,hwMckUE(r2) ; Stuff it
2338 b PassUpTrap ; Go south, young man...
2339
2340 mckhIFUE: lwz r21,hwMckIUEr(r2) ; Get I-Fetch TLB reload uncorrectable error count
2341 addi r21,r21,1 ; Count it
2342 stw r21,hwMckIUEr(r2) ; Stuff it
2343 b PassUpTrap ; Go south, young man...
2344
2345 mckDUE: lwz r21,hwMckDUE(r2) ; Get deferred uncorrectable error count
2346 addi r21,r21,1 ; Count it
2347 stw r21,hwMckDUE(r2) ; Stuff it
2348
2349 ;
2350 ; Right here is where we end up after a failure on a ml_probe_read_64.
2351 ; We will check if that is the case, and if so, fix everything up and
2352 ; return from it.
2353
2354 lis r8,hi16(EXT(ml_probe_read_64)) ; High of start
2355 lis r9,hi16(EXT(ml_probe_read_mck_64)) ; High of end
2356 ori r8,r8,lo16(EXT(ml_probe_read_64)) ; Low of start
2357 ori r9,r9,lo16(EXT(ml_probe_read_mck_64)) ; Low of end
2358 cmpld r23,r8 ; Too soon?
2359 cmpld cr1,r23,r9 ; Too late?
2360
2361 cror cr0_lt,cr0_lt,cr1_gt ; Too soo or too late?
2362 ld r3,saver12(r13) ; Get the original MSR
2363 ld r5,savelr(r13) ; Get the return address
2364 li r4,0 ; Get fail code
2365 blt-- PassUpTrap ; This is a normal machine check, just pass up...
2366 std r5,savesrr0(r13) ; Set the return MSR
2367
2368 std r3,savesrr1(r13) ; Set the return address
2369 std r4,saver3(r13) ; Set failure return code
2370 b EatRupt ; Go return from ml_probe_read_64...
2371
2372 mckDTW: lwz r21,hwMckDTW(r2) ; Get deferred tablewalk uncorrectable error count
2373 addi r21,r21,1 ; Count it
2374 stw r21,hwMckDTW(r2) ; Stuff it
2375 b PassUpTrap ; Go south, young man...
2376
2377 mckL1D: lwz r21,hwMckL1DPE(r2) ; Get data cache parity error count
2378 addi r21,r21,1 ; Count it
2379 stw r21,hwMckL1DPE(r2) ; Stuff it
2380 b PassUpTrap ; Go south, young man...
2381
2382 mckL1T: lwz r21,hwMckL1TPE(r2) ; Get TLB parity error count
2383 addi r21,r21,1 ; Count it
2384 stw r21,hwMckL1TPE(r2) ; Stuff it
2385 b PassUpTrap ; Go south, young man...
2386
2387
2388 /*
2389 * Here's where we come back from some instruction emulator. If we come back with
2390 * T_IN_VAIN, the emulation is done and we should just reload state and directly
2391 * go back to the interrupted code. Otherwise, we'll check to see if
2392 * we need to redrive with a different interrupt, i.e., DSI.
2393 * Note that this we are actually not redriving the rupt, rather changing it
2394 * into a different one. Thus we clear the redrive bit.
2395 */
2396
2397 .align 5
2398 .globl EXT(EmulExit)
2399
2400 LEXT(EmulExit)
2401
2402 cmplwi cr1,r11,T_IN_VAIN ; Was it emulated?
2403 lis r1,hi16(SAVredrive) ; Get redrive request
2404 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
2405 lwz r4,SAVflags(r13) ; Pick up the flags
2406
2407 and. r0,r4,r1 ; Check if redrive requested
2408
2409 beq++ PassUpTrap ; No redrive, just keep on going...
2410
2411 b Redrive ; Redrive the exception...
2412
2413 ;
2414 ; Jump into main handler code switching on VM at the same time.
2415 ;
2416 ; We assume kernel data is mapped contiguously in physical
2417 ; memory, otherwise we would need to switch on (at least) virtual data.
2418 ; SRs are already set up.
2419 ;
2420
2421 .align 5
2422
2423 PassUpTrap: lis r20,hi16(EXT(thandler)) ; Get thandler address
2424 ori r20,r20,lo16(EXT(thandler)) ; Get thandler address
2425 b PassUp ; Go pass it up...
2426
2427 PassUpRupt: lis r20,hi16(EXT(ihandler)) ; Get ihandler address
2428 ori r20,r20,lo16(EXT(ihandler)) ; Get ihandler address
2429 b PassUp ; Go pass it up...
2430
2431 .align 5
2432
2433 PassUpFPU: lis r20,hi16(EXT(fpu_switch)) ; Get FPU switcher address
2434 ori r20,r20,lo16(EXT(fpu_switch)) ; Get FPU switcher address
2435 b PassUp ; Go pass it up...
2436
2437 PassUpVMX: lis r20,hi16(EXT(vec_switch)) ; Get VMX switcher address
2438 ori r20,r20,lo16(EXT(vec_switch)) ; Get VMX switcher address
2439 bt++ featAltivec,PassUp ; We have VMX on this CPU...
2440 li r11,T_PROGRAM ; Say that it is a program exception
2441 li r20,8 ; Set invalid instruction
2442 stw r11,saveexception(r13) ; Set the new the exception code
2443 sth r20,savesrr1+4(r13) ; Set the invalid instruction SRR code
2444
2445 b PassUpTrap ; Go pass it up...
2446
2447 .align 5
2448
2449 PassUpAbend:
2450 lis r20,hi16(EXT(chandler)) ; Get choke handler address
2451 ori r20,r20,lo16(EXT(chandler)) ; Get choke handler address
2452 b PassUp ; Go pass it up...
2453
2454 .align 5
2455
2456 PassUp:
2457 #if INSTRUMENT
2458 mfspr r29,pmc1 ; INSTRUMENT - saveinstr[11] - Take stamp at passup or eatrupt
2459 stw r29,0x6100+(11*16)+0x0(0) ; INSTRUMENT - Save it
2460 mfspr r29,pmc2 ; INSTRUMENT - Get stamp
2461 stw r29,0x6100+(11*16)+0x4(0) ; INSTRUMENT - Save it
2462 mfspr r29,pmc3 ; INSTRUMENT - Get stamp
2463 stw r29,0x6100+(11*16)+0x8(0) ; INSTRUMENT - Save it
2464 mfspr r29,pmc4 ; INSTRUMENT - Get stamp
2465 stw r29,0x6100+(11*16)+0xC(0) ; INSTRUMENT - Save it
2466 #endif
2467
2468 lwz r10,SAVflags(r13) ; Pick up the flags
2469
2470 li r0,0xFFF ; Get a page mask
2471 li r2,MASK(MSR_BE)|MASK(MSR_SE) ; Get the mask to save trace bits
2472 andc r5,r13,r0 ; Back off to the start of savearea block
2473 mfmsr r3 ; Get our MSR
2474 rlwinm r10,r10,0,SAVredriveb+1,SAVredriveb-1 ; Clear the redrive before we pass it up
2475 li r21,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value
2476 and r3,r3,r2 ; Clear all but trace
2477 lwz r5,SACvrswap+4(r5) ; Get real to virtual conversion
2478 or r21,r21,r3 ; Keep the trace bits if they are on
2479 stw r10,SAVflags(r13) ; Set the flags with the cleared redrive flag
2480 mr r3,r11 ; Pass the exception code in the paramter reg
2481 xor r4,r13,r5 ; Pass up the virtual address of context savearea
2482 mfsprg r29,0 ; Get the per_proc block back
2483 rlwinm r4,r4,0,0,31 ; Clean top half of virtual savearea if 64-bit
2484
2485 mr r3,r21 ; Pass in the MSR we will go to
2486 bl EXT(switchSegs) ; Go handle the segment registers/STB
2487
2488 #if INSTRUMENT
2489 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[7] - Take stamp afer switchsegs
2490 stw r30,0x6100+(7*16)+0x0(0) ; INSTRUMENT - Save it
2491 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2492 stw r30,0x6100+(7*16)+0x4(0) ; INSTRUMENT - Save it
2493 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2494 stw r30,0x6100+(7*16)+0x8(0) ; INSTRUMENT - Save it
2495 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2496 stw r30,0x6100+(7*16)+0xC(0) ; INSTRUMENT - Save it
2497 #endif
2498 lwz r3,saveexception(r13) ; Recall the exception code
2499
2500 mtsrr0 r20 ; Set up the handler address
2501 mtsrr1 r21 ; Set up our normal MSR value
2502
2503 bt++ pf64Bitb,puLaunch ; Handle 64-bit machine...
2504
2505 rfi ; Launch the exception handler
2506
2507 puLaunch: rfid ; Launch the exception handler
2508
2509 /*
2510 * This routine is the main place where we return from an interruption.
2511 *
2512 * This is also where we release the quickfret list. These are saveareas
2513 * that were released as part of the exception exit path in hw_exceptions.
2514 * In order to save an atomic operation (which actually will not work
2515 * properly on a 64-bit machine) we use holdQFret to indicate that the list
2516 * is in flux and should not be looked at here. This comes into play only
2517 * when we take a PTE miss when we are queuing a savearea onto qfret.
2518 * Quite rare but could happen. If the flag is set, this code does not
2519 * release the list and waits until next time.
2520 *
2521 * All we need to remember here is that R13 must point to the savearea
2522 * that has the context we need to load up. Translation and interruptions
2523 * must be disabled.
2524 *
2525 * This code always loads the context in the savearea pointed to
2526 * by R13. In the process, it throws away the savearea. If there
2527 * is any tomfoolery with savearea stacks, it must be taken care of
2528 * before we get here.
2529 *
2530 */
2531
2532 .align 5
2533
2534 EatRupt: mfsprg r29,0 ; Get the per_proc block back
2535 mr r31,r13 ; Move the savearea pointer to the far end of the register set
2536 mfsprg r27,2 ; Get the processor features
2537
2538 lwz r3,holdQFret(r29) ; Get the release hold off flag
2539
2540 bt++ pf64Bitb,eat64a ; Skip down to the 64-bit version of this
2541
2542 ;
2543 ; This starts the 32-bit version
2544 ;
2545
2546 mr. r3,r3 ; Should we hold off the quick release?
2547 lwz r30,quickfret+4(r29) ; Pick up the quick fret list, if any
2548 la r21,saver0(r31) ; Point to the first thing we restore
2549 bne- ernoqfret ; Hold off set, do not release just now...
2550
2551 erchkfret: mr. r3,r30 ; Any savearea to quickly release?
2552 beq+ ernoqfret ; No quickfrets...
2553 lwz r30,SAVprev+4(r30) ; Chain back now
2554
2555 bl EXT(save_ret_phys) ; Put it on the free list
2556 stw r30,quickfret+4(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
2557 b erchkfret ; Try the next one...
2558
2559 .align 5
2560
2561 ernoqfret:
2562 #if INSTRUMENT
2563 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[5] - Take stamp at saveareas released
2564 stw r30,0x6100+(5*16)+0x0(0) ; INSTRUMENT - Save it
2565 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2566 stw r30,0x6100+(5*16)+0x4(0) ; INSTRUMENT - Save it
2567 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2568 stw r30,0x6100+(5*16)+0x8(0) ; INSTRUMENT - Save it
2569 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2570 stw r30,0x6100+(5*16)+0xC(0) ; INSTRUMENT - Save it
2571 #endif
2572
2573 dcbt 0,r21 ; Touch in the first thing we need
2574
2575 ;
2576 ; Here we release the savearea.
2577 ;
2578 ; Important!!!! The savearea is released before we are done with it. When the
2579 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
2580 ; will trim the list, making the extra saveareas allocatable by another processor
2581 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
2582 ; we could be very, very unhappy. The code there always queues the "just released"
2583 ; savearea to the head of the local list. Then, if it needs to trim, it will
2584 ; start with the SECOND savearea, leaving ours intact.
2585 ;
2586 ;
2587
2588 mr r3,r31 ; Get the exiting savearea in parm register
2589 bl EXT(save_ret_phys) ; Put it on the free list
2590 #if INSTRUMENT
2591 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[6] - Take stamp afer savearea released
2592 stw r3,0x6100+(6*16)+0x0(0) ; INSTRUMENT - Save it
2593 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
2594 stw r3,0x6100+(6*16)+0x4(0) ; INSTRUMENT - Save it
2595 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
2596 stw r3,0x6100+(6*16)+0x8(0) ; INSTRUMENT - Save it
2597 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
2598 stw r3,0x6100+(6*16)+0xC(0) ; INSTRUMENT - Save it
2599 #endif
2600
2601 lwz r3,savesrr1+4(r31) ; Pass in the MSR we are going to
2602 bl EXT(switchSegs) ; Go handle the segment registers/STB
2603 #if INSTRUMENT
2604 mfspr r30,pmc1 ; INSTRUMENT - saveinstr[10] - Take stamp afer switchsegs
2605 stw r30,0x6100+(10*16)+0x0(0) ; INSTRUMENT - Save it
2606 mfspr r30,pmc2 ; INSTRUMENT - Get stamp
2607 stw r30,0x6100+(10*16)+0x4(0) ; INSTRUMENT - Save it
2608 mfspr r30,pmc3 ; INSTRUMENT - Get stamp
2609 stw r30,0x6100+(10*16)+0x8(0) ; INSTRUMENT - Save it
2610 mfspr r30,pmc4 ; INSTRUMENT - Get stamp
2611 stw r30,0x6100+(10*16)+0xC(0) ; INSTRUMENT - Save it
2612 #endif
2613 li r3,savesrr1+4 ; Get offset to the srr1 value
2614
2615 lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags
2616 lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away)
2617
2618 rlwinm r25,r26,27,22,22 ; Move PR bit to BE
2619
2620 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
2621
2622 rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
2623 li r21,emfp0 ; Point to the fp savearea
2624 and r9,r9,r25 ; Clear BE if supervisor state
2625 or r26,r26,r9 ; Flip on the BE bit for special trace if needed
2626 stwcx. r26,r3,r31 ; Blow away any reservations we hold (and set BE)
2627
2628 lwz r25,savesrr0+4(r31) ; Get the SRR0 to use
2629
2630 la r28,saver4(r31) ; Point to the 32-byte line with r4-r7
2631 dcbz r21,r29 ; Clear a work area
2632 lwz r0,saver0+4(r31) ; Restore R0
2633 dcbt 0,r28 ; Touch in r4-r7
2634 lwz r1,saver1+4(r31) ; Restore R1
2635 lwz r2,saver2+4(r31) ; Restore R2
2636 la r28,saver8(r31) ; Point to the 32-byte line with r8-r11
2637 lwz r3,saver3+4(r31) ; Restore R3
2638 andis. r6,r27,hi16(pfAltivec) ; Do we have altivec on the machine?
2639 dcbt 0,r28 ; touch in r8-r11
2640 lwz r4,saver4+4(r31) ; Restore R4
2641 la r28,saver12(r31) ; Point to the 32-byte line with r12-r15
2642 mtsrr0 r25 ; Restore the SRR0 now
2643 lwz r5,saver5+4(r31) ; Restore R5
2644 mtsrr1 r26 ; Restore the SRR1 now
2645 lwz r6,saver6+4(r31) ; Restore R6
2646
2647 dcbt 0,r28 ; touch in r12-r15
2648 la r28,saver16(r31)
2649
2650 lwz r7,saver7+4(r31) ; Restore R7
2651 lwz r8,saver8+4(r31) ; Restore R8
2652 lwz r9,saver9+4(r31) ; Restore R9
2653
2654 dcbt 0,r28 ; touch in r16-r19
2655 la r28,saver20(r31)
2656
2657 lwz r10,saver10+4(r31) ; Restore R10
2658 lwz r11,saver11+4(r31) ; Restore R11
2659
2660 dcbt 0,r28 ; touch in r20-r23
2661 la r28,savevscr(r31) ; Point to the status area
2662
2663 lwz r12,saver12+4(r31) ; Restore R12
2664 lwz r13,saver13+4(r31) ; Restore R13
2665
2666 la r14,savectr+4(r31)
2667 dcbt 0,r28 ; Touch in VSCR and FPSCR
2668 dcbt 0,r14 ; touch in CTR, DAR, DSISR, VRSAVE, and Exception code
2669
2670 lwz r26,next_savearea+4(r29) ; Get the exception save area
2671 la r28,saver24(r31)
2672
2673 lwz r14,saver14+4(r31) ; Restore R14
2674 lwz r15,saver15+4(r31) ; Restore R15
2675
2676
2677 stfd f0,emfp0(r29) ; Save FP0
2678 lwz r27,savevrsave(r31) ; Get the vrsave
2679 dcbt 0,r28 ; touch in r24-r27
2680 la r28,savevscr(r31) ; Point to the status area
2681 lfd f0,savefpscrpad(r31) ; Get the fpscr
2682 la r22,saver28(r31)
2683 mtfsf 0xFF,f0 ; Restore fpscr
2684 lfd f0,emfp0(r29) ; Restore the used register
2685
2686 beq noavec3 ; No Altivec on this CPU...
2687
2688 stvxl v0,r21,r29 ; Save a vector register
2689 lvxl v0,0,r28 ; Get the vector status
2690 mtspr vrsave,r27 ; Set the vrsave
2691 mtvscr v0 ; Set the vector status
2692 lvxl v0,r21,r29 ; Restore work vector register
2693
2694 noavec3: dcbt 0,r22 ; touch in r28-r31
2695
2696 lwz r23,spcFlags(r29) ; Get the special flags from per_proc
2697 la r17,savesrr0(r31)
2698 la r26,saver0(r26) ; Point to the first part of the next savearea
2699 dcbt 0,r17 ; touch in SRR0, SRR1, CR, XER, LR
2700 lhz r28,pfrptdProc(r29) ; Get the reported processor type
2701
2702 lwz r16,saver16+4(r31) ; Restore R16
2703 lwz r17,saver17+4(r31) ; Restore R17
2704 lwz r18,saver18+4(r31) ; Restore R18
2705 lwz r19,saver19+4(r31) ; Restore R19
2706 lwz r20,saver20+4(r31) ; Restore R20
2707 lwz r21,saver21+4(r31) ; Restore R21
2708 lwz r22,saver22+4(r31) ; Restore R22
2709
2710 cmpwi cr1,r28,CPU_SUBTYPE_POWERPC_750 ; G3?
2711
2712 dcbz 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
2713
2714 andis. r23,r23,hi16(perfMonitor) ; Is the performance monitor enabled?
2715 lwz r23,saver23+4(r31) ; Restore R23
2716 cmpwi cr2,r28,CPU_SUBTYPE_POWERPC_7400 ; Yer standard G4?
2717 lwz r24,saver24+4(r31) ; Restore R24
2718 lwz r25,saver25+4(r31) ; Restore R25
2719 lwz r26,saver26+4(r31) ; Restore R26
2720 lwz r27,saver27+4(r31) ; Restore R27
2721
2722 beq+ noPerfMonRestore32 ; No perf monitor...
2723
2724 beq- cr1,perfMonRestore32_750 ; This is a G3...
2725 beq- cr2,perfMonRestore32_7400 ; Standard G4...
2726
2727 lwz r28,savepmc+16(r31)
2728 lwz r29,savepmc+20(r31)
2729 mtspr pmc5,r28 ; Restore PMC5
2730 mtspr pmc6,r29 ; Restore PMC6
2731
2732 perfMonRestore32_7400:
2733 lwz r28,savemmcr2+4(r31)
2734 mtspr mmcr2,r28 ; Restore MMCR2
2735
2736 perfMonRestore32_750:
2737 lwz r28,savepmc+0(r31)
2738 lwz r29,savepmc+4(r31)
2739 mtspr pmc1,r28 ; Restore PMC1
2740 mtspr pmc2,r29 ; Restore PMC2
2741 lwz r28,savepmc+8(r31)
2742 lwz r29,savepmc+12(r31)
2743 mtspr pmc3,r28 ; Restore PMC3
2744 mtspr pmc4,r29 ; Restore PMC4
2745 lwz r28,savemmcr1+4(r31)
2746 lwz r29,savemmcr0+4(r31)
2747 mtspr mmcr1,r28 ; Restore MMCR1
2748 mtspr mmcr0,r29 ; Restore MMCR0
2749
2750 noPerfMonRestore32:
2751 lwz r28,savecr(r31) ; Get CR to restore
2752 lwz r29,savexer+4(r31) ; Get XER to restore
2753 mtcr r28 ; Restore the CR
2754 lwz r28,savelr+4(r31) ; Get LR to restore
2755 mtxer r29 ; Restore the XER
2756 lwz r29,savectr+4(r31) ; Get the CTR to restore
2757 mtlr r28 ; Restore the LR
2758 lwz r28,saver30+4(r31) ; Get R30
2759 mtctr r29 ; Restore the CTR
2760 lwz r29,saver31+4(r31) ; Get R31
2761 mtsprg 2,r28 ; Save R30 for later
2762 lwz r28,saver28+4(r31) ; Restore R28
2763 mtsprg 3,r29 ; Save R31 for later
2764 lwz r29,saver29+4(r31) ; Restore R29
2765
2766 mfsprg r31,0 ; Get per_proc
2767 mfsprg r30,2 ; Restore R30
2768 lwz r31,pfAvailable(r31) ; Get the feature flags
2769 mtsprg 2,r31 ; Set the feature flags
2770 mfsprg r31,3 ; Restore R31
2771
2772 rfi ; Click heels three times and think very hard that there is no place like home...
2773
2774 .long 0 ; Leave this here
2775 .long 0
2776 .long 0
2777 .long 0
2778 .long 0
2779 .long 0
2780 .long 0
2781 .long 0
2782
2783
2784 ;
2785 ; This starts the 64-bit version
2786 ;
2787
2788 .align 7
2789
2790 eat64a: ld r30,quickfret(r29) ; Pick up the quick fret list, if any
2791
2792 mr. r3,r3 ; Should we hold off the quick release?
2793 la r21,saver0(r31) ; Point to the first thing we restore
2794 bne-- ernoqfre64 ; Hold off set, do not release just now...
2795
2796 erchkfre64: mr. r3,r30 ; Any savearea to quickly release?
2797 beq+ ernoqfre64 ; No quickfrets...
2798 ld r30,SAVprev(r30) ; Chain back now
2799
2800 bl EXT(save_ret_phys) ; Put it on the free list
2801
2802 std r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
2803 b erchkfre64 ; Try the next one...
2804
2805 .align 7
2806
2807 ernoqfre64: dcbt 0,r21 ; Touch in the first thing we need
2808
2809 ;
2810 ; Here we release the savearea.
2811 ;
2812 ; Important!!!! The savearea is released before we are done with it. When the
2813 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
2814 ; will trim the list, making the extra saveareas allocatable by another processor
2815 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
2816 ; we could be very, very unhappy. The code there always queues the "just released"
2817 ; savearea to the head of the local list. Then, if it needs to trim, it will
2818 ; start with the SECOND savearea, leaving ours intact.
2819 ;
2820 ;
2821
2822 li r3,lgKillResv ; Get spot to kill reservation
2823 stdcx. r3,0,r3 ; Blow away any reservations we hold
2824
2825 mr r3,r31 ; Get the exiting savearea in parm register
2826 bl EXT(save_ret_phys) ; Put it on the free list
2827
2828 lwz r3,savesrr1+4(r31) ; Pass in the MSR we will be going to
2829 bl EXT(switchSegs) ; Go handle the segment registers/STB
2830
2831 lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags
2832 ld r26,savesrr1(r31) ; Get destination MSR
2833 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
2834 rlwinm r25,r26,27,22,22 ; Move PR bit to BE
2835
2836 rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on
2837 li r21,emfp0 ; Point to a workarea
2838 and r9,r9,r25 ; Clear BE if supervisor state
2839 or r26,r26,r9 ; Flip on the BE bit for special trace if needed
2840
2841 ld r25,savesrr0(r31) ; Get the SRR0 to use
2842 la r28,saver16(r31) ; Point to the 128-byte line with r16-r31
2843 dcbz128 r21,r29 ; Clear a work area
2844 ld r0,saver0(r31) ; Restore R0
2845 dcbt 0,r28 ; Touch in r16-r31
2846 ld r1,saver1(r31) ; Restore R1
2847 ld r2,saver2(r31) ; Restore R2
2848 ld r3,saver3(r31) ; Restore R3
2849 mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7)
2850 ld r4,saver4(r31) ; Restore R4
2851 mtsrr0 r25 ; Restore the SRR0 now
2852 ld r5,saver5(r31) ; Restore R5
2853 mtsrr1 r26 ; Restore the SRR1 now
2854 ld r6,saver6(r31) ; Restore R6
2855
2856 ld r7,saver7(r31) ; Restore R7
2857 ld r8,saver8(r31) ; Restore R8
2858 ld r9,saver9(r31) ; Restore R9
2859
2860 la r28,savevscr(r31) ; Point to the status area
2861
2862 ld r10,saver10(r31) ; Restore R10
2863 ld r11,saver11(r31) ; Restore R11
2864 ld r12,saver12(r31) ; Restore R12
2865 ld r13,saver13(r31) ; Restore R13
2866
2867 ld r26,next_savearea(r29) ; Get the exception save area
2868
2869 ld r14,saver14(r31) ; Restore R14
2870 ld r15,saver15(r31) ; Restore R15
2871 lwz r27,savevrsave(r31) ; Get the vrsave
2872
2873 bf-- pfAltivecb,noavec2s ; Skip if no VMX...
2874
2875 stvxl v0,r21,r29 ; Save a vector register
2876 lvxl v0,0,r28 ; Get the vector status
2877 mtvscr v0 ; Set the vector status
2878
2879 lvxl v0,r21,r29 ; Restore work vector register
2880
2881 noavec2s: mtspr vrsave,r27 ; Set the vrsave
2882
2883 lwz r28,saveexception(r31) ; Get exception type
2884 stfd f0,emfp0(r29) ; Save FP0
2885 lfd f0,savefpscrpad(r31) ; Get the fpscr
2886 mtfsf 0xFF,f0 ; Restore fpscr
2887 lfd f0,emfp0(r29) ; Restore the used register
2888 ld r16,saver16(r31) ; Restore R16
2889 lwz r30,spcFlags(r29) ; Get the special flags from per_proc
2890 ld r17,saver17(r31) ; Restore R17
2891 ld r18,saver18(r31) ; Restore R18
2892 cmplwi cr1,r28,T_RESET ; Are we returning from a reset?
2893 ld r19,saver19(r31) ; Restore R19
2894 ld r20,saver20(r31) ; Restore R20
2895 li r27,0 ; Get a zero
2896 ld r21,saver21(r31) ; Restore R21
2897 la r26,saver0(r26) ; Point to the first part of the next savearea
2898 andis. r30,r30,hi16(perfMonitor) ; Is the performance monitor enabled?
2899 ld r22,saver22(r31) ; Restore R22
2900 ld r23,saver23(r31) ; Restore R23
2901 bne++ cr1,er64rrst ; We are not returning from a reset...
2902 stw r27,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Allow resets again
2903
2904 er64rrst: ld r24,saver24(r31) ; Restore R24
2905
2906 dcbz128 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
2907
2908 ld r25,saver25(r31) ; Restore R25
2909 ld r26,saver26(r31) ; Restore R26
2910 ld r27,saver27(r31) ; Restore R27
2911
2912 beq++ noPerfMonRestore64 ; Nope...
2913
2914 lwz r28,savepmc+0(r31)
2915 lwz r29,savepmc+4(r31)
2916 mtspr pmc1_gp,r28 ; Restore PMC1
2917 mtspr pmc2_gp,r29 ; Restore PMC2
2918 lwz r28,savepmc+8(r31)
2919 lwz r29,savepmc+12(r31)
2920 mtspr pmc3_gp,r28 ; Restore PMC3
2921 mtspr pmc4_gp,r29 ; Restore PMC4
2922 lwz r28,savepmc+16(r31)
2923 lwz r29,savepmc+20(r31)
2924 mtspr pmc5_gp,r28 ; Restore PMC5
2925 mtspr pmc6_gp,r29 ; Restore PMC6
2926 lwz r28,savepmc+24(r31)
2927 lwz r29,savepmc+28(r31)
2928 mtspr pmc7_gp,r28 ; Restore PMC7
2929 mtspr pmc8_gp,r29 ; Restore PMC8
2930 ld r28,savemmcr1(r31)
2931 ld r29,savemmcr2(r31)
2932 mtspr mmcr1_gp,r28 ; Restore MMCR1
2933 mtspr mmcra_gp,r29 ; Restore MMCRA
2934 ld r28,savemmcr0(r31)
2935
2936 mtspr mmcr0_gp,r28 ; Restore MMCR0
2937
2938 noPerfMonRestore64:
2939 mfsprg r30,0 ; Get per_proc
2940 lwz r28,savecr(r31) ; Get CR to restore
2941 ld r29,savexer(r31) ; Get XER to restore
2942 mtcr r28 ; Restore the CR
2943 ld r28,savelr(r31) ; Get LR to restore
2944 mtxer r29 ; Restore the XER
2945 ld r29,savectr(r31) ; Get the CTR to restore
2946 mtlr r28 ; Restore the LR
2947 ld r28,saver30(r31) ; Get R30
2948 mtctr r29 ; Restore the CTR
2949 ld r29,saver31(r31) ; Get R31
2950 mtspr hsprg0,r28 ; Save R30 for later
2951 ld r28,saver28(r31) ; Restore R28
2952 mtsprg 3,r29 ; Save R31 for later
2953 ld r29,saver29(r31) ; Restore R29
2954
2955 lwz r31,pfAvailable(r30) ; Get the feature flags
2956 lwz r30,UAW(r30) ; Get the User Assist Word
2957 mtsprg 2,r31 ; Set the feature flags
2958 mfsprg r31,3 ; Restore R31
2959 mtsprg 3,r30 ; Set the UAW
2960 mfspr r30,hsprg0 ; Restore R30
2961
2962 rfid ; Click heels three times and think very hard that there is no place like home...
2963
2964
2965
2966 /*
2967 * exception_exit(savearea *)
2968 *
2969 *
2970 * ENTRY : IR and/or DR and/or interruptions can be on
2971 * R3 points to the virtual address of a savearea
2972 */
2973
2974 .align 5
2975 .globl EXT(exception_exit)
2976
2977 LEXT(exception_exit)
2978
2979 mfsprg r29,2 ; Get feature flags
2980 mr r31,r3 ; Get the savearea in the right register
2981 mtcrf 0x04,r29 ; Set the features
2982 li r0,1 ; Get this just in case
2983 mtcrf 0x02,r29 ; Set the features
2984 lis r30,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
2985 rlwinm r4,r3,0,0,19 ; Round down to savearea block base
2986 lis r1,hi16(SAVredrive) ; Get redrive request
2987 mfsprg r2,0 ; Get the per_proc block
2988 ori r30,r30,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
2989 bt++ pf64Bitb,eeSixtyFour ; We are 64-bit...
2990
2991 lwz r4,SACvrswap+4(r4) ; Get the virtual to real translation
2992
2993 bt pfNoMSRirb,eeNoMSR ; No MSR...
2994
2995 mtmsr r30 ; Translation and all off
2996 isync ; Toss prefetch
2997 b eeNoMSRx
2998
2999 .align 5
3000
3001 eeSixtyFour:
3002 ld r4,SACvrswap(r4) ; Get the virtual to real translation
3003 rldimi r30,r0,63,MSR_SF_BIT ; Set SF bit (bit 0)
3004 mtmsrd r30 ; Set 64-bit mode, turn off EE, DR, and IR
3005 isync ; Toss prefetch
3006 b eeNoMSRx
3007
3008 .align 5
3009
3010 eeNoMSR: li r0,loadMSR ; Get the MSR setter SC
3011 mr r3,r30 ; Get new MSR
3012 sc ; Set it
3013
3014 eeNoMSRx: xor r31,r31,r4 ; Convert the savearea to physical addressing
3015 lwz r4,SAVflags(r31) ; Pick up the flags
3016 mr r13,r31 ; Put savearea here also
3017
3018 #if INSTRUMENT
3019 mfspr r5,pmc1 ; INSTRUMENT - saveinstr[8] - stamp exception exit
3020 stw r5,0x6100+(8*16)+0x0(0) ; INSTRUMENT - Save it
3021 mfspr r5,pmc2 ; INSTRUMENT - Get stamp
3022 stw r5,0x6100+(8*16)+0x4(0) ; INSTRUMENT - Save it
3023 mfspr r5,pmc3 ; INSTRUMENT - Get stamp
3024 stw r5,0x6100+(8*16)+0x8(0) ; INSTRUMENT - Save it
3025 mfspr r5,pmc4 ; INSTRUMENT - Get stamp
3026 stw r5,0x6100+(8*16)+0xC(0) ; INSTRUMENT - Save it
3027 #endif
3028
3029
3030 and. r0,r4,r1 ; Check if redrive requested
3031
3032 dcbt br0,r2 ; We will need this in just a sec
3033
3034 beq+ EatRupt ; No redrive, just exit...
3035
3036 lwz r11,saveexception(r13) ; Restore exception code
3037 b Redrive ; Redrive the exception...
3038
3039
3040
3041 .align 12 ; Force page alignment
3042
3043 .globl EXT(ExceptionVectorsEnd)
3044 EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */
3045
3046
3047
3048
3049 ;
3050 ; Here is where we keep the low memory globals
3051 ;
3052
3053 . = 0x5000
3054 .globl EXT(lowGlo)
3055
3056 EXT(lowGlo):
3057
3058 .ascii "Hagfish " ; 5000 Unique eyecatcher
3059 .long 0 ; 5008 Zero
3060 .long 0 ; 500C Zero cont...
3061 .long EXT(per_proc_info) ; 5010 pointer to per_procs
3062 .long 0 ; 5014 reserved
3063 .long 0 ; 5018 reserved
3064 .long 0 ; 501C reserved
3065 .long 0 ; 5020 reserved
3066 .long 0 ; 5024 reserved
3067 .long 0 ; 5028 reserved
3068 .long 0 ; 502C reserved
3069 .long 0 ; 5030 reserved
3070 .long 0 ; 5034 reserved
3071 .long 0 ; 5038 reserved
3072 .long 0 ; 503C reserved
3073 .long 0 ; 5040 reserved
3074 .long 0 ; 5044 reserved
3075 .long 0 ; 5048 reserved
3076 .long 0 ; 504C reserved
3077 .long 0 ; 5050 reserved
3078 .long 0 ; 5054 reserved
3079 .long 0 ; 5058 reserved
3080 .long 0 ; 505C reserved
3081 .long 0 ; 5060 reserved
3082 .long 0 ; 5064 reserved
3083 .long 0 ; 5068 reserved
3084 .long 0 ; 506C reserved
3085 .long 0 ; 5070 reserved
3086 .long 0 ; 5074 reserved
3087 .long 0 ; 5078 reserved
3088 .long 0 ; 507C reserved
3089
3090 .globl EXT(trcWork)
3091 EXT(trcWork):
3092 .long 0 ; 5080 The next trace entry to use
3093 #if DEBUG
3094 .long 0xFFFFFFFF ; 5084 All enabled
3095 #else
3096 .long 0x00000000 ; 5084 All disabled on non-debug systems
3097 #endif
3098 .long 0 ; 5088 Start of the trace table
3099 .long 0 ; 508C End (wrap point) of the trace
3100 .long 0 ; 5090 Saved mask while in debugger
3101 .long 0 ; 5094 Size of trace table (1 - 256 pages)
3102 .long 0 ; 5098 traceGas[0]
3103 .long 0 ; 509C traceGas[1]
3104
3105 .long 0 ; 50A0 reserved
3106 .long 0 ; 50A4 reserved
3107 .long 0 ; 50A8 reserved
3108 .long 0 ; 50AC reserved
3109 .long 0 ; 50B0 reserved
3110 .long 0 ; 50B4 reserved
3111 .long 0 ; 50B8 reserved
3112 .long 0 ; 50BC reserved
3113 .long 0 ; 50C0 reserved
3114 .long 0 ; 50C4 reserved
3115 .long 0 ; 50C8 reserved
3116 .long 0 ; 50CC reserved
3117 .long 0 ; 50D0 reserved
3118 .long 0 ; 50D4 reserved
3119 .long 0 ; 50D8 reserved
3120 .long 0 ; 50DC reserved
3121 .long 0 ; 50E0 reserved
3122 .long 0 ; 50E4 reserved
3123 .long 0 ; 50E8 reserved
3124 .long 0 ; 50EC reserved
3125 .long 0 ; 50F0 reserved
3126 .long 0 ; 50F4 reserved
3127 .long 0 ; 50F8 reserved
3128 .long 0 ; 50FC reserved
3129
3130 .globl EXT(saveanchor)
3131
3132 EXT(saveanchor): ; 5100 saveanchor
3133 .set .,.+SVsize
3134
3135 .long 0 ; 5140 reserved
3136 .long 0 ; 5144 reserved
3137 .long 0 ; 5148 reserved
3138 .long 0 ; 514C reserved
3139 .long 0 ; 5150 reserved
3140 .long 0 ; 5154 reserved
3141 .long 0 ; 5158 reserved
3142 .long 0 ; 515C reserved
3143 .long 0 ; 5160 reserved
3144 .long 0 ; 5164 reserved
3145 .long 0 ; 5168 reserved
3146 .long 0 ; 516C reserved
3147 .long 0 ; 5170 reserved
3148 .long 0 ; 5174 reserved
3149 .long 0 ; 5178 reserved
3150 .long 0 ; 517C reserved
3151
3152 .long 0 ; 5180 tlbieLock
3153
3154 .long 0 ; 5184 reserved
3155 .long 0 ; 5188 reserved
3156 .long 0 ; 518C reserved
3157 .long 0 ; 5190 reserved
3158 .long 0 ; 5194 reserved
3159 .long 0 ; 5198 reserved
3160 .long 0 ; 519C reserved
3161 .long 0 ; 51A0 reserved
3162 .long 0 ; 51A4 reserved
3163 .long 0 ; 51A8 reserved
3164 .long 0 ; 51AC reserved
3165 .long 0 ; 51B0 reserved
3166 .long 0 ; 51B4 reserved
3167 .long 0 ; 51B8 reserved
3168 .long 0 ; 51BC reserved
3169 .long 0 ; 51C0 reserved
3170 .long 0 ; 51C4 reserved
3171 .long 0 ; 51C8 reserved
3172 .long 0 ; 51CC reserved
3173 .long 0 ; 51D0 reserved
3174 .long 0 ; 51D4 reserved
3175 .long 0 ; 51D8 reserved
3176 .long 0 ; 51DC reserved
3177 .long 0 ; 51E0 reserved
3178 .long 0 ; 51E4 reserved
3179 .long 0 ; 51E8 reserved
3180 .long 0 ; 51EC reserved
3181 .long 0 ; 51F0 reserved
3182 .long 0 ; 51F4 reserved
3183 .long 0 ; 51F8 reserved
3184 .long 0 ; 51FC reserved
3185
3186 .globl EXT(dgWork)
3187
3188 EXT(dgWork):
3189
3190 .long 0 ; 5200 dgLock
3191 .long 0 ; 5204 dgFlags
3192 .long 0 ; 5208 dgMisc0
3193 .long 0 ; 520C dgMisc1
3194 .long 0 ; 5210 dgMisc2
3195 .long 0 ; 5214 dgMisc3
3196 .long 0 ; 5218 dgMisc4
3197 .long 0 ; 521C dgMisc5
3198
3199 .long 0 ; 5220 reserved
3200 .long 0 ; 5224 reserved
3201 .long 0 ; 5228 reserved
3202 .long 0 ; 522C reserved
3203 .long 0 ; 5230 reserved
3204 .long 0 ; 5234 reserved
3205 .long 0 ; 5238 reserved
3206 .long 0 ; 523C reserved
3207 .long 0 ; 5240 reserved
3208 .long 0 ; 5244 reserved
3209 .long 0 ; 5248 reserved
3210 .long 0 ; 524C reserved
3211 .long 0 ; 5250 reserved
3212 .long 0 ; 5254 reserved
3213 .long 0 ; 5258 reserved
3214 .long 0 ; 525C reserved
3215 .long 0 ; 5260 reserved
3216 .long 0 ; 5264 reserved
3217 .long 0 ; 5268 reserved
3218 .long 0 ; 526C reserved
3219 .long 0 ; 5270 reserved
3220 .long 0 ; 5274 reserved
3221 .long 0 ; 5278 reserved
3222 .long 0 ; 527C reserved
3223
3224 .long 0 ; 5280 reserved
3225 .long 0 ; 5284 reserved
3226 .long 0 ; 5288 reserved
3227 .long 0 ; 528C reserved
3228 .long 0 ; 5290 reserved
3229 .long 0 ; 5294 reserved
3230 .long 0 ; 5298 reserved
3231 .long 0 ; 529C reserved
3232 .long 0 ; 52A0 reserved
3233 .long 0 ; 52A4 reserved
3234 .long 0 ; 52A8 reserved
3235 .long 0 ; 52AC reserved
3236 .long 0 ; 52B0 reserved
3237 .long 0 ; 52B4 reserved
3238 .long 0 ; 52B8 reserved
3239 .long 0 ; 52BC reserved
3240 .long 0 ; 52C0 reserved
3241 .long 0 ; 52C4 reserved
3242 .long 0 ; 52C8 reserved
3243 .long 0 ; 52CC reserved
3244 .long 0 ; 52D0 reserved
3245 .long 0 ; 52D4 reserved
3246 .long 0 ; 52D8 reserved
3247 .long 0 ; 52DC reserved
3248 .long 0 ; 52E0 reserved
3249 .long 0 ; 52E4 reserved
3250 .long 0 ; 52E8 reserved
3251 .long 0 ; 52EC reserved
3252 .long 0 ; 52F0 reserved
3253 .long 0 ; 52F4 reserved
3254 .long 0 ; 52F8 reserved
3255 .long 0 ; 52FC reserved
3256
3257 .globl EXT(killresv)
3258 EXT(killresv):
3259
3260 .long 0 ; 5300 Used to kill reservations
3261 .long 0 ; 5304 Used to kill reservations
3262 .long 0 ; 5308 Used to kill reservations
3263 .long 0 ; 530C Used to kill reservations
3264 .long 0 ; 5310 Used to kill reservations
3265 .long 0 ; 5314 Used to kill reservations
3266 .long 0 ; 5318 Used to kill reservations
3267 .long 0 ; 531C Used to kill reservations
3268 .long 0 ; 5320 Used to kill reservations
3269 .long 0 ; 5324 Used to kill reservations
3270 .long 0 ; 5328 Used to kill reservations
3271 .long 0 ; 532C Used to kill reservations
3272 .long 0 ; 5330 Used to kill reservations
3273 .long 0 ; 5334 Used to kill reservations
3274 .long 0 ; 5338 Used to kill reservations
3275 .long 0 ; 533C Used to kill reservations
3276 .long 0 ; 5340 Used to kill reservations
3277 .long 0 ; 5344 Used to kill reservations
3278 .long 0 ; 5348 Used to kill reservations
3279 .long 0 ; 534C Used to kill reservations
3280 .long 0 ; 5350 Used to kill reservations
3281 .long 0 ; 5354 Used to kill reservations
3282 .long 0 ; 5358 Used to kill reservations
3283 .long 0 ; 535C Used to kill reservations
3284 .long 0 ; 5360 Used to kill reservations
3285 .long 0 ; 5364 Used to kill reservations
3286 .long 0 ; 5368 Used to kill reservations
3287 .long 0 ; 536C Used to kill reservations
3288 .long 0 ; 5370 Used to kill reservations
3289 .long 0 ; 5374 Used to kill reservations
3290 .long 0 ; 5378 Used to kill reservations
3291 .long 0 ; 537C Used to kill reservations
3292
3293 .long 0 ; 5380 reserved
3294 .long 0 ; 5384 reserved
3295 .long 0 ; 5388 reserved
3296 .long 0 ; 538C reserved
3297 .long 0 ; 5390 reserved
3298 .long 0 ; 5394 reserved
3299 .long 0 ; 5398 reserved
3300 .long 0 ; 539C reserved
3301 .long 0 ; 53A0 reserved
3302 .long 0 ; 53A4 reserved
3303 .long 0 ; 53A8 reserved
3304 .long 0 ; 53AC reserved
3305 .long 0 ; 53B0 reserved
3306 .long 0 ; 53B4 reserved
3307 .long 0 ; 53B8 reserved
3308 .long 0 ; 53BC reserved
3309 .long 0 ; 53C0 reserved
3310 .long 0 ; 53C4 reserved
3311 .long 0 ; 53C8 reserved
3312 .long 0 ; 53CC reserved
3313 .long 0 ; 53D0 reserved
3314 .long 0 ; 53D4 reserved
3315 .long 0 ; 53D8 reserved
3316 .long 0 ; 53DC reserved
3317 .long 0 ; 53E0 reserved
3318 .long 0 ; 53E4 reserved
3319 .long 0 ; 53E8 reserved
3320 .long 0 ; 53EC reserved
3321 .long 0 ; 53F0 reserved
3322 .long 0 ; 53F4 reserved
3323 .long 0 ; 53F8 reserved
3324 .long 0 ; 53FC reserved
3325
3326
3327 ;
3328 ; The "shared page" is used for low-level debugging
3329 ;
3330
3331 . = 0x6000
3332 .globl EXT(sharedPage)
3333
3334 EXT(sharedPage): ; Per processor data area
3335 .long 0xC24BC195 ; Comm Area validity value
3336 .long 0x87859393 ; Comm Area validity value
3337 .long 0xE681A2C8 ; Comm Area validity value
3338 .long 0x8599855A ; Comm Area validity value
3339 .long 0xD74BD296 ; Comm Area validity value
3340 .long 0x8388E681 ; Comm Area validity value
3341 .long 0xA2C88599 ; Comm Area validity value
3342 .short 0x855A ; Comm Area validity value
3343 .short 1 ; Comm Area version number
3344 .fill 1016*4,1,0 ; (filled with 0s)
3345
3346 .data
3347 .align ALIGN
3348 .globl EXT(exception_end)
3349 EXT(exception_end):
3350 .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */
3351
3352
3353