]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/lowmem_vectors.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / lowmem_vectors.s
1 /*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <assym.s>
33 #include <debug.h>
34 #include <db_machine_commands.h>
35
36 #include <mach_debug.h>
37 #include <ppc/asm.h>
38 #include <ppc/proc_reg.h>
39 #include <ppc/exception.h>
40 #include <ppc/Performance.h>
41 #include <ppc/savearea.h>
42 #include <mach/ppc/vm_param.h>
43
44 #define ESPDEBUG 0
45 #define INSTRUMENT 0
46
47 #define featAltivec 29
48 #define wasNapping 30
49
50 #define VECTOR_SEGMENT .section __VECTORS, __interrupts
51
52 VECTOR_SEGMENT
53
54 .globl EXT(lowGlo)
55 EXT(lowGlo):
56
57 .globl EXT(ExceptionVectorsStart)
58
59 EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */
60 baseR: /* Used so we have more readable code */
61
62 ;
63 ; Handle system reset.
64 ; We do not ever expect a hard reset so we do not actually check.
65 ; When we come here, we check for a RESET_HANDLER_START (which means we are
66 ; waking up from sleep), a RESET_HANDLER_BUPOR (which is using for bring up
67 ; when starting directly from a POR), and RESET_HANDLER_IGNORE (which means
68 ; ignore the interrupt).
69 ;
70 ; Some machines (so far, 32-bit guys) will always ignore a non-START interrupt.
71 ; The ones who do take it, check if the interrupt is too be ignored. This is
72 ; always the case until the previous reset is handled (i.e., we have exited
73 ; from the debugger).
74 ;
75 . = 0xf0
76 .globl EXT(ResetHandler)
77 EXT(ResetHandler):
78 .long 0x0
79 .long 0x0
80 .long 0x0
81
82 . = 0x100
83 .L_handler100:
84 mtsprg 2,r13 /* Save R13 */
85 mtsprg 3,r11 /* Save R11 */
86 lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type
87 mfcr r11
88 cmpi cr0,r13,RESET_HANDLER_START
89 bne resetexc
90
91 li r11,RESET_HANDLER_NULL
92 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type
93
94 lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0)
95 lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0)
96 mtlr r4
97 blr
98
99 resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence?
100 bne resetexc2 ; No...
101 lis r4,hi16(EXT(resetPOR)) ; Get POR code
102 ori r4,r4,lo16(EXT(resetPOR)) ; The rest
103 mtlr r4 ; Set it
104 blr ; Jump to it....
105
106 resetexc2: cmplwi cr1,r13,RESET_HANDLER_IGNORE ; Are we ignoring these? (Software debounce)
107
108 mfsprg r13,0 ; Get per_proc
109 lwz r13,pfAvailable(r13) ; Get the features
110 rlwinm. r13,r13,0,pf64Bitb,pf64Bitb ; Is this a 64-bit machine?
111 cror cr1_eq,cr0_eq,cr1_eq ; See if we want to take this
112 bne-- cr1,rxCont ; Yes, continue...
113 bne-- rxIg64 ; 64-bit path...
114
115 mtcr r11 ; Restore the CR
116 mfsprg r13,2 ; Restore R13
117 mfsprg r11,0 ; Get per_proc
118 lwz r11,pfAvailable(r11) ; Get the features
119 mtsprg 2,r11 ; Restore sprg2
120 mfsprg r11,3 ; Restore R11
121 rfi ; Return and ignore the reset
122
123 rxIg64: mtcr r11 ; Restore the CR
124 mfsprg r11,0 ; Get per_proc
125 mtspr hsprg0,r14 ; Save a register
126 ld r14,UAW(r11) ; Get the User Assist DoubleWord
127 mfsprg r13,2 ; Restore R13
128 lwz r11,pfAvailable(r11) ; Get the features
129 mtsprg 2,r11 ; Restore sprg2
130 mfsprg r11,3 ; Restore R11
131 mtsprg 3,r14 ; Set the UAW in sprg3
132 mfspr r14,hsprg0 ; Restore R14
133 rfid ; Return and ignore the reset
134
135 rxCont: mtcr r11
136 li r11,RESET_HANDLER_IGNORE ; Get set to ignore
137 stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Start ignoring these
138 mfsprg r13,1 /* Get the exception save area */
139 li r11,T_RESET /* Set 'rupt code */
140 b .L_exception_entry /* Join common... */
141
142 /*
143 * Machine check
144 */
145
146 . = 0x200
147 .L_handler200:
148 mtsprg 2,r13 ; Save R13
149 mtsprg 3,r11 ; Save R11
150
151 .globl EXT(extPatchMCK)
152 LEXT(extPatchMCK) ; This is patched to a nop for 64-bit
153 b h200aaa ; Skip 64-bit code...
154
155 ;
156 ; Fall through here for 970 MCKs.
157 ;
158
159 li r11,1 ; ?
160 sldi r11,r11,32+3 ; ?
161 mfspr r13,hid4 ; ?
162 or r11,r11,r13 ; ?
163 sync
164 mtspr hid4,r11 ; ?
165 isync
166 li r11,1 ; ?
167 sldi r11,r11,32+8 ; ?
168 andc r13,r13,r11 ; ?
169 lis r11,0xE000 ; Get the unlikeliest ESID possible
170 sync
171 mtspr hid4,r13 ; ?
172 isync ; ?
173
174 srdi r11,r11,1 ; ?
175 slbie r11 ; ?
176 sync
177 isync
178
179 li r11,T_MACHINE_CHECK ; Set rupt code
180 b .L_exception_entry ; Join common...
181
182 ;
183 ; Preliminary checking of other MCKs
184 ;
185
186 h200aaa: mfsrr1 r11 ; Get the SRR1
187 mfcr r13 ; Save the CR
188
189 rlwinm. r11,r11,0,dcmck,dcmck ; ?
190 beq+ notDCache ; ?
191
192 sync
193 mfspr r11,msscr0 ; ?
194 dssall ; ?
195 sync
196 isync
197
198 oris r11,r11,hi16(dl1hwfm) ; ?
199 mtspr msscr0,r11 ; ?
200
201 rstbsy: mfspr r11,msscr0 ; ?
202
203 rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ?
204 bne rstbsy ; ?
205
206 sync ; ?
207
208 mfsprg r11,0 ; Get the per_proc
209 mtcrf 255,r13 ; Restore CRs
210 lwz r13,hwMachineChecks(r11) ; Get old count
211 addi r13,r13,1 ; Count this one
212 stw r13,hwMachineChecks(r11) ; Set new count
213 lwz r11,pfAvailable(r11) ; Get the feature flags
214 mfsprg r13,2 ; Restore R13
215 mtsprg 2,r11 ; Set the feature flags
216 mfsprg r11,3 ; Restore R11
217 rfi ; Return
218
219 notDCache: mtcrf 255,r13 ; Restore CRs
220 li r11,T_MACHINE_CHECK ; Set rupt code
221 b .L_exception_entry ; Join common...
222
223
224 /*
225 * Data access - page fault, invalid memory rights for operation
226 */
227
228 . = 0x300
229 .L_handler300:
230 mtsprg 2,r13 /* Save R13 */
231 mtsprg 3,r11 /* Save R11 */
232 li r11,T_DATA_ACCESS /* Set 'rupt code */
233 b .L_exception_entry /* Join common... */
234
235
236 /*
237 * Data segment
238 */
239
240 . = 0x380
241 .L_handler380:
242 mtsprg 2,r13 ; Save R13
243 mtsprg 3,r11 ; Save R11
244 li r11,T_DATA_SEGMENT ; Set rupt code
245 b .L_exception_entry ; Join common...
246
247 /*
248 * Instruction access - as for data access
249 */
250
251 . = 0x400
252 .L_handler400:
253 mtsprg 2,r13 ; Save R13
254 mtsprg 3,r11 ; Save R11
255 li r11,T_INSTRUCTION_ACCESS ; Set rupt code
256 b .L_exception_entry ; Join common...
257
258 /*
259 * Instruction segment
260 */
261
262 . = 0x480
263 .L_handler480:
264 mtsprg 2,r13 ; Save R13
265 mtsprg 3,r11 ; Save R11
266 li r11,T_INSTRUCTION_SEGMENT ; Set rupt code
267 b .L_exception_entry ; Join common...
268
269 /*
270 * External interrupt
271 */
272
273 . = 0x500
274 .L_handler500:
275 mtsprg 2,r13 ; Save R13
276 mtsprg 3,r11 ; Save R11
277 li r11,T_INTERRUPT ; Set rupt code
278 b .L_exception_entry ; Join common...
279
280 /*
281 * Alignment - many reasons
282 */
283
284 . = 0x600
285 .L_handler600:
286 mtsprg 2,r13 /* Save R13 */
287 mtsprg 3,r11 /* Save R11 */
288 li r11,T_ALIGNMENT|T_FAM /* Set 'rupt code */
289 b .L_exception_entry /* Join common... */
290
291 /*
292 * Program - floating point exception, illegal inst, priv inst, user trap
293 */
294
295 . = 0x700
296 .L_handler700:
297 mtsprg 2,r13 ; Save R13
298 mtsprg 3,r11 ; Save R11
299 li r11,T_PROGRAM|T_FAM ; Set program interruption code
300 b .L_exception_entry ; Join common...
301
302 /*
303 * Floating point disabled
304 */
305
306 . = 0x800
307 .L_handler800:
308 mtsprg 2,r13 /* Save R13 */
309 mtsprg 3,r11 /* Save R11 */
310 li r11,T_FP_UNAVAILABLE /* Set 'rupt code */
311 b .L_exception_entry /* Join common... */
312
313
314 /*
315 * Decrementer - DEC register has passed zero.
316 */
317
318 . = 0x900
319 .L_handler900:
320 mtsprg 2,r13 /* Save R13 */
321 mtsprg 3,r11 /* Save R11 */
322 li r11,T_DECREMENTER /* Set 'rupt code */
323 b .L_exception_entry /* Join common... */
324
325 /*
326 * I/O controller interface error - MACH does not use this
327 */
328
329 . = 0xA00
330 .L_handlerA00:
331 mtsprg 2,r13 /* Save R13 */
332 mtsprg 3,r11 /* Save R11 */
333 li r11,T_IO_ERROR /* Set 'rupt code */
334 b .L_exception_entry /* Join common... */
335
336 /*
337 * Reserved
338 */
339
340 . = 0xB00
341 .L_handlerB00:
342 mtsprg 2,r13 /* Save R13 */
343 mtsprg 3,r11 /* Save R11 */
344 li r11,T_RESERVED /* Set 'rupt code */
345 b .L_exception_entry /* Join common... */
346
347
348 ; System Calls (sc instruction)
349 ;
350 ; The syscall number is in r0. All we do here is munge the number into an
351 ; 8-bit index into the "scTable", and dispatch on it to handle the Ultra
352 ; Fast Traps (UFTs.) The index is:
353 ;
354 ; 0x80 - set if syscall number is 0x80000000 (CutTrace)
355 ; 0x40 - set if syscall number is 0x00006004
356 ; 0x20 - set if upper 29 bits of syscall number are 0xFFFFFFF8
357 ; 0x10 - set if upper 29 bits of syscall number are 0x00007FF0
358 ; 0x0E - low three bits of syscall number
359 ; 0x01 - zero, as scTable is an array of shorts
360
361 . = 0xC00
362 .L_handlerC00:
363 mtsprg 3,r11 ; Save R11
364 mtsprg 2,r13 ; Save R13
365 rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
366 xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
367 addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
368 cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
369 cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
370 xoris r0,r0,0x8000 ; Flip bit to make 0 iff 0x80000000
371 rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
372 cntlzw r13,r0 ; Set bit 0x20 iff 0x80000000
373 xoris r0,r0,0x8000 ; Flip bit to restore R0
374 rlwimi r11,r13,2,0x80 ; Set bit 0x80 iff CutTrace
375 xori r13,r0,0x6004 ; start to check for 0x6004
376 rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
377 cntlzw r13,r13 ; set bit 0x20 iff 0x6004
378 rlwinm r11,r11,0,0,30 ; clear out bit 31
379 rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
380 lhz r11,lo16(scTable)(r11) ; get branch address from sc table
381 mfctr r13 ; save callers ctr in r13
382 mtctr r11 ; set up branch to syscall handler
383 mfsprg r11,0 ; get per_proc, which most UFTs use
384 bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
385
386 /*
387 * Trace - generated by single stepping
388 * performance monitor BE branch enable tracing/logging
389 * is also done here now. while this is permanently in the
390 * system the impact is completely unnoticable as this code is
391 * only executed when (a) a single step or branch exception is
392 * hit, (b) in the single step debugger case there is so much
393 * overhead already the few extra instructions for testing for BE
394 * are not even noticable
395 *
396 * Note that this trace is available only to user state so we do not
397 * need to set sprg2 before returning.
398 */
399
400 . = 0xD00
401 .L_handlerD00:
402 mtsprg 3,r11 ; Save R11
403 mfsprg r11,2 ; Get the feature flags
404 mtsprg 2,r13 ; Save R13
405
406 li r11,T_TRACE|T_FAM ; Set interrupt code
407 b .L_exception_entry ; Join common...
408
409 /*
410 * Floating point assist
411 */
412
413 . = 0xE00
414 .L_handlerE00:
415 mtsprg 2,r13 /* Save R13 */
416 mtsprg 3,r11 /* Save R11 */
417 li r11,T_FP_ASSIST /* Set 'rupt code */
418 b .L_exception_entry /* Join common... */
419
420
421 /*
422 * Performance monitor interruption
423 */
424
425 . = 0xF00
426 PMIhandler:
427 mtsprg 2,r13 /* Save R13 */
428 mtsprg 3,r11 /* Save R11 */
429 li r11,T_PERF_MON /* Set 'rupt code */
430 b .L_exception_entry /* Join common... */
431
432
433 /*
434 * VMX exception
435 */
436
437 . = 0xF20
438 VMXhandler:
439 mtsprg 2,r13 /* Save R13 */
440 mtsprg 3,r11 /* Save R11 */
441 li r11,T_VMX /* Set 'rupt code */
442 b .L_exception_entry /* Join common... */
443
444
445
446 ;
447 ; Instruction translation miss exception - not supported
448 ;
449
450 . = 0x1000
451 .L_handler1000:
452 mtsprg 2,r13 ; Save R13
453 mtsprg 3,r11 ; Save R11
454 li r11,T_INVALID_EXCP0 ; Set rupt code
455 b .L_exception_entry ; Join common...
456
457
458
459 ;
460 ; Data load translation miss exception - not supported
461 ;
462
463 . = 0x1100
464 .L_handler1100:
465 mtsprg 2,r13 ; Save R13
466 mtsprg 3,r11 ; Save R11
467 li r11,T_INVALID_EXCP1 ; Set rupt code
468 b .L_exception_entry ; Join common...
469
470
471
472 ;
473 ; Data store translation miss exception - not supported
474 ;
475
476 . = 0x1200
477 .L_handler1200:
478 mtsprg 2,r13 ; Save R13
479 mtsprg 3,r11 ; Save R11
480 li r11,T_INVALID_EXCP2 ; Set rupt code
481 b .L_exception_entry ; Join common...
482
483
484 /*
485 * Instruction address breakpoint
486 */
487
488 . = 0x1300
489 .L_handler1300:
490 mtsprg 2,r13 /* Save R13 */
491 mtsprg 3,r11 /* Save R11 */
492 li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */
493 b .L_exception_entry /* Join common... */
494
495 /*
496 * System management interrupt
497 */
498
499 . = 0x1400
500 .L_handler1400:
501 mtsprg 2,r13 /* Save R13 */
502 mtsprg 3,r11 /* Save R11 */
503 li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */
504 b .L_exception_entry /* Join common... */
505
506
507 /*
508 * Soft Patch
509 */
510
511 . = 0x1500
512 .L_handler1500:
513 mtsprg 2,r13 /* Save R13 */
514 mtsprg 3,r11 /* Save R11 */
515 li r11,T_SOFT_PATCH /* Set 'rupt code */
516 b .L_exception_entry /* Join common... */
517
518 ;
519 ; Altivec Java Mode Assist interrupt or Maintenace interrupt
520 ;
521
522 . = 0x1600
523 .L_handler1600:
524 mtsprg 2,r13 /* Save R13 */
525 mtsprg 3,r11 /* Save R11 */
526 li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */
527 b .L_exception_entry /* Join common... */
528
529 ;
530 ; Altivec Java Mode Assist interrupt or Thermal interruption
531 ;
532
533 . = 0x1700
534 .L_handler1700:
535 mtsprg 2,r13 /* Save R13 */
536 mtsprg 3,r11 /* Save R11 */
537 li r11,T_THERMAL /* Set 'rupt code */
538 b .L_exception_entry /* Join common... */
539
540 ;
541 ; Thermal interruption - 64-bit
542 ;
543
544 . = 0x1800
545 .L_handler1800:
546 mtsprg 2,r13 /* Save R13 */
547 mtsprg 3,r11 /* Save R11 */
548 li r11,T_ARCHDEP0 /* Set 'rupt code */
549 b .L_exception_entry /* Join common... */
550
551 /*
552 * There is now a large gap of reserved traps
553 */
554
555 /*
556 * Instrumentation interruption
557 */
558
559 . = 0x2000
560 .L_handler2000:
561 mtsprg 2,r13 /* Save R13 */
562 mtsprg 3,r11 /* Save R11 */
563 li r11,T_INSTRUMENTATION /* Set 'rupt code */
564 b .L_exception_entry /* Join common... */
565
566
567
568 .data
569 .align ALIGN
570 .globl EXT(exception_entry)
571 EXT(exception_entry):
572 .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */
573
574 VECTOR_SEGMENT
575
576 /*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
577 *
578 * First-level syscall dispatch. The syscall vector maps r0 (the syscall number) into an
579 * index into the "scTable" (below), and then branches to one of these routines. The PPC
580 * syscalls come in several varieties, as follows:
581 *
582 * 1. If (syscall & 0xFFFFF000) == 0x00007000, then it is a PPC Fast Trap or UFT.
583 * The UFTs are dispatched here, the Fast Traps are dispatched in hw_exceptions.s.
584 *
585 * 2. If (syscall & 0xFFFFF000) == 0x00006000, then it is a PPC-only trap.
586 * One of these (0x6004) is a UFT, but most are dispatched in hw_exceptions.s. These
587 * are mostly Blue Box or VMM (Virtual Machine) calls.
588 *
589 * 3. If (syscall & 0xFFFFFFF0) == 0xFFFFFFF0, then it is also a UFT and is dispatched here.
590 *
591 * 4. If (syscall & 0xFFFFF000) == 0x80000000, then it is a "firmware" call and is dispatched in
592 * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here as an ultra
593 * fast trap.
594 *
595 * 5. If (syscall & 0xFFFFF000) == 0xFFFFF000, and it is not one of the above, then it is a Mach
596 * syscall, which are dispatched in hw_exceptions.s via "mach_trap_table".
597 *
598 * 6. If (syscall & 0xFFFFF000) == 0x00000000, then it is a BSD syscall, which are dispatched
599 * by "unix_syscall" using the "sysent" table.
600 *
601 * What distinguishes the UFTs, aside from being ultra fast, is that they cannot rely on translation
602 * being on, and so cannot look at the activation or task control block, etc. We handle them right
603 * here, and return to the caller without turning interrupts or translation on. The UFTs are:
604 *
605 * 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask
606 * 0xFFFFFFFE - BlueBox only - MKIsPreemptiveTaskEnv
607 * 0x00007FF2 - User state only - thread info (32-bit mode)
608 * 0x00007FF3 - User state only - floating point / vector facility status
609 * 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines
610 * 0x00006004 - vmm_dispatch (only some of which are UFTs)
611 *
612 * "scTable" is an array of 2-byte addresses, accessed using a 7-bit index derived from the syscall
613 * number as follows:
614 *
615 * 0x80 (A) - set if syscall number is 0x80000000
616 * 0x40 (B) - set if syscall number is 0x00006004
617 * 0x20 (C) - set if upper 29 bits of syscall number are 0xFFFFFFF8
618 * 0x10 (D) - set if upper 29 bits of syscall number are 0x00007FF0
619 * 0x0E (E) - low three bits of syscall number
620 *
621 * If you define another UFT, try to use a number in one of the currently decoded ranges, ie one marked
622 * "unassigned" below. The dispatch table and the UFT handlers must reside in the first 32KB of
623 * physical memory.
624 */
625
626 .align 8 ; start this table on a 256-byte boundry
627 scTable: ; ABCD E
628 .short uftNormalSyscall-baseR ; 0000 0 these syscalls are not in a reserved range
629 .short uftNormalSyscall-baseR ; 0000 1 these syscalls are not in a reserved range
630 .short uftNormalSyscall-baseR ; 0000 2 these syscalls are not in a reserved range
631 .short uftNormalSyscall-baseR ; 0000 3 these syscalls are not in a reserved range
632 .short uftNormalSyscall-baseR ; 0000 4 these syscalls are not in a reserved range
633 .short uftNormalSyscall-baseR ; 0000 5 these syscalls are not in a reserved range
634 .short uftNormalSyscall-baseR ; 0000 6 these syscalls are not in a reserved range
635 .short uftNormalSyscall-baseR ; 0000 7 these syscalls are not in a reserved range
636
637 .short uftNormalSyscall-baseR ; 0001 0 0x7FF0 is unassigned
638 .short uftNormalSyscall-baseR ; 0001 1 0x7FF1 is Set Thread Info Fast Trap (pass up)
639 .short uftThreadInfo-baseR ; 0001 2 0x7FF2 is Thread Info
640 .short uftFacilityStatus-baseR ; 0001 3 0x7FF3 is Facility Status
641 .short uftLoadMSR-baseR ; 0001 4 0x7FF4 is Load MSR
642 .short uftNormalSyscall-baseR ; 0001 5 0x7FF5 is the Null FastPath Trap (pass up)
643 .short uftNormalSyscall-baseR ; 0001 6 0x7FF6 is unassigned
644 .short uftNormalSyscall-baseR ; 0001 7 0x7FF7 is unassigned
645
646 .short uftNormalSyscall-baseR ; 0010 0 0xFFFFFFF0 is unassigned
647 .short uftNormalSyscall-baseR ; 0010 1 0xFFFFFFF1 is unassigned
648 .short uftNormalSyscall-baseR ; 0010 2 0xFFFFFFF2 is unassigned
649 .short uftNormalSyscall-baseR ; 0010 3 0xFFFFFFF3 is unassigned
650 .short uftNormalSyscall-baseR ; 0010 4 0xFFFFFFF4 is unassigned
651 .short uftNormalSyscall-baseR ; 0010 5 0xFFFFFFF5 is unassigned
652 .short uftIsPreemptiveTaskEnv-baseR ; 0010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv
653 .short uftIsPreemptiveTask-baseR ; 0010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask
654
655 .short WhoaBaby-baseR ; 0011 0 impossible combination
656 .short WhoaBaby-baseR ; 0011 1 impossible combination
657 .short WhoaBaby-baseR ; 0011 2 impossible combination
658 .short WhoaBaby-baseR ; 0011 3 impossible combination
659 .short WhoaBaby-baseR ; 0011 4 impossible combination
660 .short WhoaBaby-baseR ; 0011 5 impossible combination
661 .short WhoaBaby-baseR ; 0011 6 impossible combination
662 .short WhoaBaby-baseR ; 0011 7 impossible combination
663
664 .short WhoaBaby-baseR ; 0100 0 0x6000 is an impossible index (diagCall)
665 .short WhoaBaby-baseR ; 0100 1 0x6001 is an impossible index (vmm_get_version)
666 .short WhoaBaby-baseR ; 0100 2 0x6002 is an impossible index (vmm_get_features)
667 .short WhoaBaby-baseR ; 0100 3 0x6003 is an impossible index (vmm_init_context)
668 .short uftVMM-baseR ; 0100 4 0x6004 is vmm_dispatch (only some of which are UFTs)
669 .short WhoaBaby-baseR ; 0100 5 0x6005 is an impossible index (bb_enable_bluebox)
670 .short WhoaBaby-baseR ; 0100 6 0x6006 is an impossible index (bb_disable_bluebox)
671 .short WhoaBaby-baseR ; 0100 7 0x6007 is an impossible index (bb_settaskenv)
672
673 .short uftNormalSyscall-baseR ; 0101 0 these syscalls are not in a reserved range
674 .short uftNormalSyscall-baseR ; 0101 1 these syscalls are not in a reserved range
675 .short uftNormalSyscall-baseR ; 0101 2 these syscalls are not in a reserved range
676 .short uftNormalSyscall-baseR ; 0101 3 these syscalls are not in a reserved range
677 .short uftNormalSyscall-baseR ; 0101 4 these syscalls are not in a reserved range
678 .short uftNormalSyscall-baseR ; 0101 5 these syscalls are not in a reserved range
679 .short uftNormalSyscall-baseR ; 0101 6 these syscalls are not in a reserved range
680 .short uftNormalSyscall-baseR ; 0101 7 these syscalls are not in a reserved range
681
682 .short uftNormalSyscall-baseR ; 0110 0 these syscalls are not in a reserved range
683 .short uftNormalSyscall-baseR ; 0110 1 these syscalls are not in a reserved range
684 .short uftNormalSyscall-baseR ; 0110 2 these syscalls are not in a reserved range
685 .short uftNormalSyscall-baseR ; 0110 3 these syscalls are not in a reserved range
686 .short uftNormalSyscall-baseR ; 0110 4 these syscalls are not in a reserved range
687 .short uftNormalSyscall-baseR ; 0110 5 these syscalls are not in a reserved range
688 .short uftNormalSyscall-baseR ; 0110 6 these syscalls are not in a reserved range
689 .short uftNormalSyscall-baseR ; 0110 7 these syscalls are not in a reserved range
690
691 .short uftNormalSyscall-baseR ; 0111 0 these syscalls are not in a reserved range
692 .short uftNormalSyscall-baseR ; 0111 1 these syscalls are not in a reserved range
693 .short uftNormalSyscall-baseR ; 0111 2 these syscalls are not in a reserved range
694 .short uftNormalSyscall-baseR ; 0111 3 these syscalls are not in a reserved range
695 .short uftNormalSyscall-baseR ; 0111 4 these syscalls are not in a reserved range
696 .short uftNormalSyscall-baseR ; 0111 5 these syscalls are not in a reserved range
697 .short uftNormalSyscall-baseR ; 0111 6 these syscalls are not in a reserved range
698 .short uftNormalSyscall-baseR ; 0111 7 these syscalls are not in a reserved range
699
700 .short uftCutTrace-baseR ; 1000 0 CutTrace
701 .short uftNormalSyscall-baseR ; 1000 1 these syscalls are not in a reserved range
702 .short uftNormalSyscall-baseR ; 1000 2 these syscalls are not in a reserved range
703 .short uftNormalSyscall-baseR ; 1000 3 these syscalls are not in a reserved range
704 .short uftNormalSyscall-baseR ; 1000 4 these syscalls are not in a reserved range
705 .short uftNormalSyscall-baseR ; 1000 5 these syscalls are not in a reserved range
706 .short uftNormalSyscall-baseR ; 1000 6 these syscalls are not in a reserved range
707 .short uftNormalSyscall-baseR ; 1000 7 these syscalls are not in a reserved range
708
709 .short uftNormalSyscall-baseR ; 1001 0 these syscalls are not in a reserved range
710 .short uftNormalSyscall-baseR ; 1001 1 these syscalls are not in a reserved range
711 .short uftNormalSyscall-baseR ; 1001 2 these syscalls are not in a reserved range
712 .short uftNormalSyscall-baseR ; 1001 3 these syscalls are not in a reserved range
713 .short uftNormalSyscall-baseR ; 1001 4 these syscalls are not in a reserved range
714 .short uftNormalSyscall-baseR ; 1001 5 these syscalls are not in a reserved range
715 .short uftNormalSyscall-baseR ; 1001 6 these syscalls are not in a reserved range
716 .short uftNormalSyscall-baseR ; 1001 7 these syscalls are not in a reserved range
717
718 .short uftNormalSyscall-baseR ; 1010 0 these syscalls are not in a reserved range
719 .short uftNormalSyscall-baseR ; 1010 1 these syscalls are not in a reserved range
720 .short uftNormalSyscall-baseR ; 1010 2 these syscalls are not in a reserved range
721 .short uftNormalSyscall-baseR ; 1010 3 these syscalls are not in a reserved range
722 .short uftNormalSyscall-baseR ; 1010 4 these syscalls are not in a reserved range
723 .short uftNormalSyscall-baseR ; 1010 5 these syscalls are not in a reserved range
724 .short uftNormalSyscall-baseR ; 1010 6 these syscalls are not in a reserved range
725 .short uftNormalSyscall-baseR ; 1010 7 these syscalls are not in a reserved range
726
727 .short uftNormalSyscall-baseR ; 1011 0 these syscalls are not in a reserved range
728 .short uftNormalSyscall-baseR ; 1011 1 these syscalls are not in a reserved range
729 .short uftNormalSyscall-baseR ; 1011 2 these syscalls are not in a reserved range
730 .short uftNormalSyscall-baseR ; 1011 3 these syscalls are not in a reserved range
731 .short uftNormalSyscall-baseR ; 1011 4 these syscalls are not in a reserved range
732 .short uftNormalSyscall-baseR ; 1011 5 these syscalls are not in a reserved range
733 .short uftNormalSyscall-baseR ; 1011 6 these syscalls are not in a reserved range
734 .short uftNormalSyscall-baseR ; 1011 7 these syscalls are not in a reserved range
735
736 .short uftNormalSyscall-baseR ; 1100 0 these syscalls are not in a reserved range
737 .short uftNormalSyscall-baseR ; 1100 1 these syscalls are not in a reserved range
738 .short uftNormalSyscall-baseR ; 1100 2 these syscalls are not in a reserved range
739 .short uftNormalSyscall-baseR ; 1100 3 these syscalls are not in a reserved range
740 .short uftNormalSyscall-baseR ; 1100 4 these syscalls are not in a reserved range
741 .short uftNormalSyscall-baseR ; 1100 5 these syscalls are not in a reserved range
742 .short uftNormalSyscall-baseR ; 1100 6 these syscalls are not in a reserved range
743 .short uftNormalSyscall-baseR ; 1100 7 these syscalls are not in a reserved range
744
745 .short uftNormalSyscall-baseR ; 1101 0 these syscalls are not in a reserved range
746 .short uftNormalSyscall-baseR ; 1101 1 these syscalls are not in a reserved range
747 .short uftNormalSyscall-baseR ; 1101 2 these syscalls are not in a reserved range
748 .short uftNormalSyscall-baseR ; 1101 3 these syscalls are not in a reserved range
749 .short uftNormalSyscall-baseR ; 1101 4 these syscalls are not in a reserved range
750 .short uftNormalSyscall-baseR ; 1101 5 these syscalls are not in a reserved range
751 .short uftNormalSyscall-baseR ; 1101 6 these syscalls are not in a reserved range
752 .short uftNormalSyscall-baseR ; 1101 7 these syscalls are not in a reserved range
753
754 .short uftNormalSyscall-baseR ; 1110 0 these syscalls are not in a reserved range
755 .short uftNormalSyscall-baseR ; 1110 1 these syscalls are not in a reserved range
756 .short uftNormalSyscall-baseR ; 1110 2 these syscalls are not in a reserved range
757 .short uftNormalSyscall-baseR ; 1110 3 these syscalls are not in a reserved range
758 .short uftNormalSyscall-baseR ; 1110 4 these syscalls are not in a reserved range
759 .short uftNormalSyscall-baseR ; 1110 5 these syscalls are not in a reserved range
760 .short uftNormalSyscall-baseR ; 1110 6 these syscalls are not in a reserved range
761 .short uftNormalSyscall-baseR ; 1110 7 these syscalls are not in a reserved range
762
763 .short uftNormalSyscall-baseR ; 1111 0 these syscalls are not in a reserved range
764 .short uftNormalSyscall-baseR ; 1111 1 these syscalls are not in a reserved range
765 .short uftNormalSyscall-baseR ; 1111 2 these syscalls are not in a reserved range
766 .short uftNormalSyscall-baseR ; 1111 3 these syscalls are not in a reserved range
767 .short uftNormalSyscall-baseR ; 1111 4 these syscalls are not in a reserved range
768 .short uftNormalSyscall-baseR ; 1111 5 these syscalls are not in a reserved range
769 .short uftNormalSyscall-baseR ; 1111 6 these syscalls are not in a reserved range
770 .short uftNormalSyscall-baseR ; 1111 7 these syscalls are not in a reserved range
771
772 .align 2 ; prepare for code
773
774
775 /* Ultra Fast Trap (UFT) Handlers:
776 *
777 * We get here directly from the hw syscall vector via the "scTable" vector (above),
778 * with interrupts and VM off, in 64-bit mode if supported, and with all registers live
779 * except the following:
780 *
781 * r11 = per_proc ptr (ie, sprg0)
782 * r13 = holds caller's ctr register
783 * sprg2 = holds caller's r13
784 * sprg3 = holds caller's r11
785 */
786
787 ; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
788
789 uftVMM:
790 mtctr r13 ; restore callers ctr
791 lwz r11,spcFlags(r11) ; get the special flags word from per_proc
792 mfcr r13 ; save callers entire cr (we use all fields below)
793 rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
794 andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
795 cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
796 bne-- uftNormal80 ; not eligible for FAM UFTs
797 cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
798 cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
799 cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
800 bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
801 b EXT(vmm_ufp) ; handle UFT range of vmm_dispatch syscall
802
803
804 ; Handle blue box UFTs (syscalls -1 and -2).
805
806 uftIsPreemptiveTask:
807 uftIsPreemptiveTaskEnv:
808 mtctr r13 ; restore callers ctr
809 lwz r11,spcFlags(r11) ; get the special flags word from per_proc
810 mfcr r13,0x80 ; save callers cr0 so we can use it
811 andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
812 cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
813 blt-- uftNormal80 ; No...
814 cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
815 rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
816 mfsprg r11,0 ; Get the per proc once more
817 bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
818 lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv (only difference)
819 b uftRestoreThenRFI ; restore modified cr0 and return
820
821
822 ; Handle "Thread Info" UFT (0x7FF2)
823
824 .globl EXT(uft_uaw_nop_if_32bit)
825 uftThreadInfo:
826 lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
827 LEXT(uft_uaw_nop_if_32bit)
828 ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
829 mtctr r13 ; restore callers ctr
830 b uftRFI ; done
831
832
833 ; Handle "Facility Status" UFT (0x7FF3)
834
835 uftFacilityStatus:
836 lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
837 mtctr r13 ; restore callers ctr
838 b uftRFI ; done
839
840
841 ; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
842
843 uftLoadMSR:
844 mfsrr1 r11 ; get callers MSR
845 mtctr r13 ; restore callers ctr
846 mfcr r13,0x80 ; save callers cr0 so we can test PR
847 rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
848 bne- uftNormal80 ; do not permit from user mode
849 mfsprg r11,0 ; restore per_proc
850 mtsrr1 r3 ; Set new MSR
851
852
853 ; Return to caller after UFT. When called:
854 ; r11 = per_proc ptr
855 ; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
856 ; sprg2 = callers r13
857 ; sprg3 = callers r11
858
859 uftRestoreThenRFI: ; WARNING: can drop down to here
860 mtcrf 0x80,r13 ; restore callers cr0
861 uftRFI:
862 .globl EXT(uft_nop_if_32bit)
863 LEXT(uft_nop_if_32bit)
864 b uftX64 ; patched to NOP if 32-bit processor
865
866 uftX32: lwz r11,pfAvailable(r11) ; Get the feature flags
867 mfsprg r13,2 ; Restore R13
868 mtsprg 2,r11 ; Set the feature flags
869 mfsprg r11,3 ; Restore R11
870 rfi ; Back to our guy...
871
872 uftX64: mtspr hsprg0,r14 ; Save a register in a Hypervisor SPRG
873 ld r14,UAW(r11) ; Get the User Assist DoubleWord
874 lwz r11,pfAvailable(r11) ; Get the feature flags
875 mfsprg r13,2 ; Restore R13
876 mtsprg 2,r11 ; Set the feature flags
877 mfsprg r11,3 ; Restore R11
878 mtsprg 3,r14 ; Set the UAW in sprg3
879 mfspr r14,hsprg0 ; Restore R14
880 rfid ; Back to our guy...
881
882 ;
883 ; Quickly cut a trace table entry for the CutTrace firmware call.
884 ;
885 ; All registers except R11 and R13 are unchanged.
886 ;
887 ; Note that this code cuts a trace table entry for the CutTrace call only.
888 ; An identical entry is made during normal interrupt processing. Any entry
889 ; format entry changes made must be done in both places.
890 ;
891
892 .align 5
893
894 .globl EXT(uft_cuttrace)
895 LEXT(uft_cuttrace)
896 uftCutTrace:
897 b uftct64 ; patched to NOP if 32-bit processor
898
899 stw r20,tempr0(r11) ; Save some work registers
900 lwz r20,dgFlags(0) ; Get the flags
901 stw r21,tempr1(r11) ; Save some work registers
902 mfsrr1 r21 ; Get the SRR1
903 rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
904 stw r25,tempr2(r11) ; Save some work registers
905 orc r20,r20,r21 ; Get ~PR | FC
906 mfcr r25 ; Save the CR
907 stw r22,tempr3(r11) ; Save some work registers
908 lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
909 andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq is we are in problem state and the validity bit is not set
910 stw r23,tempr4(r11) ; Save some work registers
911 lwz r23,traceMask(0) ; Get the trace mask
912 stw r24,tempr5(r11) ; Save some work registers
913 beq- ctbail32 ; Can not issue from user...
914
915
916 addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
917 rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
918 and. r24,r24,r23 ; See if both are on
919
920 ;
921 ; We select a trace entry using a compare and swap on the next entry field.
922 ; Since we do not lock the actual trace buffer, there is a potential that
923 ; another processor could wrap an trash our entry. Who cares?
924 ;
925
926 li r23,trcWork ; Get the trace work area address
927 lwz r21,traceStart(0) ; Get the start of trace table
928 lwz r22,traceEnd(0) ; Get end of trace table
929
930 beq-- ctdisa32 ; Leave because tracing is disabled...
931
932 ctgte32: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
933 addi r24,r20,LTR_size ; Point to the next trace entry
934 cmplw r24,r22 ; Do we need to wrap the trace table?
935 bne+ ctgte32s ; No wrap, we got us a trace entry...
936
937 mr r24,r21 ; Wrap back to start
938
939 ctgte32s: stwcx. r24,0,r23 ; Try to update the current pointer
940 bne- ctgte32 ; Collision, try again...
941
942 #if ESPDEBUG
943 dcbf 0,r23 ; Force to memory
944 sync
945 #endif
946
947 dcbz 0,r20 ; Clear and allocate first trace line
948 li r24,32 ; Offset to next line
949
950 ctgte32tb: mftbu r21 ; Get the upper time now
951 mftb r22 ; Get the lower time now
952 mftbu r23 ; Get upper again
953 cmplw r21,r23 ; Has it ticked?
954 bne- ctgte32tb ; Yes, start again...
955
956 dcbz r24,r20 ; Clean second line
957
958 ;
959 ; Let us cut that trace entry now.
960 ;
961 ; Note that this code cuts a trace table entry for the CutTrace call only.
962 ; An identical entry is made during normal interrupt processing. Any entry
963 ; format entry changes made must be done in both places.
964 ;
965
966 lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
967 li r23,T_SYSTEM_CALL ; Get the system call id
968 mtctr r13 ; Restore the callers CTR
969 sth r24,LTR_cpu(r20) ; Save processor number
970 li r24,64 ; Offset to third line
971 sth r23,LTR_excpt(r20) ; Set the exception code
972 dcbz r24,r20 ; Clean 3rd line
973 mfspr r23,dsisr ; Get the DSISR
974 stw r21,LTR_timeHi(r20) ; Save top of time stamp
975 li r24,96 ; Offset to fourth line
976 mflr r21 ; Get the LR
977 dcbz r24,r20 ; Clean 4th line
978 stw r22,LTR_timeLo(r20) ; Save bottom of time stamp
979 mfsrr0 r22 ; Get SRR0
980 stw r25,LTR_cr(r20) ; Save CR
981 mfsrr1 r24 ; Get the SRR1
982 stw r23,LTR_dsisr(r20) ; Save DSISR
983 stw r22,LTR_srr0+4(r20) ; Save SRR0
984 mfdar r23 ; Get DAR
985 stw r24,LTR_srr1+4(r20) ; Save SRR1
986 stw r23,LTR_dar+4(r20) ; Save DAR
987 stw r21,LTR_lr+4(r20) ; Save LR
988
989 stw r13,LTR_ctr+4(r20) ; Save CTR
990 stw r0,LTR_r0+4(r20) ; Save register
991 stw r1,LTR_r1+4(r20) ; Save register
992 stw r2,LTR_r2+4(r20) ; Save register
993 stw r3,LTR_r3+4(r20) ; Save register
994 stw r4,LTR_r4+4(r20) ; Save register
995 stw r5,LTR_r5+4(r20) ; Save register
996 stw r6,LTR_r6+4(r20) ; Save register
997
998 #if 0
999 lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
1000 stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
1001 #endif
1002
1003 #if ESPDEBUG
1004 addi r21,r20,32 ; Second line
1005 addi r22,r20,64 ; Third line
1006 dcbst 0,r20 ; Force to memory
1007 dcbst 0,r21 ; Force to memory
1008 addi r21,r22,32 ; Fourth line
1009 dcbst 0,r22 ; Force to memory
1010 dcbst 0,r21 ; Force to memory
1011 sync ; Make sure it all goes
1012 #endif
1013
1014 ctdisa32: mtcrf 0x80,r25 ; Restore the used condition register field
1015 lwz r20,tempr0(r11) ; Restore work register
1016 lwz r21,tempr1(r11) ; Restore work register
1017 lwz r25,tempr2(r11) ; Restore work register
1018 mtctr r13 ; Restore the callers CTR
1019 lwz r22,tempr3(r11) ; Restore work register
1020 lwz r23,tempr4(r11) ; Restore work register
1021 lwz r24,tempr5(r11) ; Restore work register
1022 b uftX32 ; Go restore the rest and go...
1023
1024 ctbail32: mtcrf 0x80,r25 ; Restore the used condition register field
1025 lwz r20,tempr0(r11) ; Restore work register
1026 lwz r21,tempr1(r11) ; Restore work register
1027 lwz r25,tempr2(r11) ; Restore work register
1028 mtctr r13 ; Restore the callers CTR
1029 lwz r22,tempr3(r11) ; Restore work register
1030 lwz r23,tempr4(r11) ; Restore work register
1031 b uftNormalSyscall ; Go pass it on along...
1032
1033 ;
1034 ; This is the 64-bit version.
1035 ;
1036
1037 uftct64: std r20,tempr0(r11) ; Save some work registers
1038 lwz r20,dgFlags(0) ; Get the flags
1039 std r21,tempr1(r11) ; Save some work registers
1040 mfsrr1 r21 ; Get the SRR1
1041 rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
1042 std r25,tempr2(r11) ; Save some work registers
1043 orc r20,r20,r21 ; Get ~PR | FC
1044 mfcr r25 ; Save the CR
1045 std r22,tempr3(r11) ; Save some work registers
1046 lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
1047 andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq when we are in problem state and the validity bit is not set
1048 std r23,tempr4(r11) ; Save some work registers
1049 lwz r23,traceMask(0) ; Get the trace mask
1050 std r24,tempr5(r11) ; Save some work registers
1051 beq-- ctbail64 ; Can not issue from user...
1052
1053 addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
1054 rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
1055 and. r24,r24,r23 ; See if both are on
1056
1057 ;
1058 ; We select a trace entry using a compare and swap on the next entry field.
1059 ; Since we do not lock the actual trace buffer, there is a potential that
1060 ; another processor could wrap an trash our entry. Who cares?
1061 ;
1062
1063 li r23,trcWork ; Get the trace work area address
1064 lwz r21,traceStart(0) ; Get the start of trace table
1065 lwz r22,traceEnd(0) ; Get end of trace table
1066
1067 beq-- ctdisa64 ; Leave because tracing is disabled...
1068
1069 ctgte64: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1070 addi r24,r20,LTR_size ; Point to the next trace entry
1071 cmplw r24,r22 ; Do we need to wrap the trace table?
1072 bne++ ctgte64s ; No wrap, we got us a trace entry...
1073
1074 mr r24,r21 ; Wrap back to start
1075
1076 ctgte64s: stwcx. r24,0,r23 ; Try to update the current pointer
1077 bne-- ctgte64 ; Collision, try again...
1078
1079 #if ESPDEBUG
1080 dcbf 0,r23 ; Force to memory
1081 sync
1082 #endif
1083
1084 dcbz128 0,r20 ; Zap the trace entry
1085
1086 mftb r21 ; Get the time
1087
1088 ;
1089 ; Let us cut that trace entry now.
1090 ;
1091 ; Note that this code cuts a trace table entry for the CutTrace call only.
1092 ; An identical entry is made during normal interrupt processing. Any entry
1093 ; format entry changes made must be done in both places.
1094 ;
1095
1096 lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
1097 li r23,T_SYSTEM_CALL ; Get the system call id
1098 sth r24,LTR_cpu(r20) ; Save processor number
1099 sth r23,LTR_excpt(r20) ; Set the exception code
1100 mfspr r23,dsisr ; Get the DSISR
1101 std r21,LTR_timeHi(r20) ; Save top of time stamp
1102 mflr r21 ; Get the LR
1103 mfsrr0 r22 ; Get SRR0
1104 stw r25,LTR_cr(r20) ; Save CR
1105 mfsrr1 r24 ; Get the SRR1
1106 stw r23,LTR_dsisr(r20) ; Save DSISR
1107 std r22,LTR_srr0(r20) ; Save SRR0
1108 mfdar r23 ; Get DAR
1109 std r24,LTR_srr1(r20) ; Save SRR1
1110 std r23,LTR_dar(r20) ; Save DAR
1111 std r21,LTR_lr(r20) ; Save LR
1112
1113 std r13,LTR_ctr(r20) ; Save CTR
1114 std r0,LTR_r0(r20) ; Save register
1115 std r1,LTR_r1(r20) ; Save register
1116 std r2,LTR_r2(r20) ; Save register
1117 std r3,LTR_r3(r20) ; Save register
1118 std r4,LTR_r4(r20) ; Save register
1119 std r5,LTR_r5(r20) ; Save register
1120 std r6,LTR_r6(r20) ; Save register
1121
1122 #if 0
1123 lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
1124 stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
1125 #endif
1126
1127 #if ESPDEBUG
1128 dcbf 0,r20 ; Force to memory
1129 sync ; Make sure it all goes
1130 #endif
1131
1132 ctdisa64: mtcrf 0x80,r25 ; Restore the used condition register field
1133 ld r20,tempr0(r11) ; Restore work register
1134 ld r21,tempr1(r11) ; Restore work register
1135 ld r25,tempr2(r11) ; Restore work register
1136 mtctr r13 ; Restore the callers CTR
1137 ld r22,tempr3(r11) ; Restore work register
1138 ld r23,tempr4(r11) ; Restore work register
1139 ld r24,tempr5(r11) ; Restore work register
1140 b uftX64 ; Go restore the rest and go...
1141
1142 ctbail64: mtcrf 0x80,r25 ; Restore the used condition register field
1143 ld r20,tempr0(r11) ; Restore work register
1144 ld r21,tempr1(r11) ; Restore work register
1145 ld r25,tempr2(r11) ; Restore work register
1146 mtctr r13 ; Restore the callers CTR
1147 ld r22,tempr3(r11) ; Restore work register
1148 ld r23,tempr4(r11) ; Restore work register
1149 li r11,T_SYSTEM_CALL|T_FAM ; Set system code call
1150 b extEntry64 ; Go straight to the 64-bit code...
1151
1152
1153
1154 ; Handle a system call that is not a UFT and which thus goes upstairs.
1155
1156 uftNormalFF: ; here with entire cr in r13
1157 mtcr r13 ; restore all 8 fields
1158 b uftNormalSyscall1 ; Join common...
1159
1160 uftNormal80: ; here with callers cr0 in r13
1161 mtcrf 0x80,r13 ; restore cr0
1162 b uftNormalSyscall1 ; Join common...
1163
1164 uftNormalSyscall: ; r13 = callers ctr
1165 mtctr r13 ; restore ctr
1166 uftNormalSyscall1:
1167 li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
1168
1169
1170 /*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>*/
1171 /*
1172 * .L_exception_entry(type)
1173 *
1174 * Come here via branch directly from the vector, or falling down from above, with the following
1175 * set up:
1176 *
1177 * ENTRY: interrupts off, VM off, in 64-bit mode if supported
1178 * Caller's r13 saved in sprg2.
1179 * Caller's r11 saved in sprg3.
1180 * Exception code (ie, T_SYSTEM_CALL etc) in r11.
1181 * All other registers are live.
1182 *
1183 */
1184
1185 .L_exception_entry: ; WARNING: can fall through from UFT handler
1186
1187 /*
1188 *
1189 * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ
1190 * instruction to clear and allcoate a line in the cache. This way we won't take any cache
1191 * misses, so these stores won't take all that long. Except the first line that is because
1192 * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
1193 * off also.
1194 *
1195 * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
1196 * are ignored.
1197 */
1198
1199
1200 .globl EXT(extPatch32)
1201
1202
1203 LEXT(extPatch32)
1204 b extEntry64 ; Go do 64-bit (patched to a nop if 32-bit)
1205 mfsprg r13,0 ; Load per_proc
1206 lwz r13,next_savearea+4(r13) ; Get the exception save area
1207 stw r0,saver0+4(r13) ; Save register 0
1208 stw r1,saver1+4(r13) ; Save register 1
1209
1210 mfspr r1,hid0 ; Get HID0
1211 mfcr r0 ; Save the whole CR
1212
1213 mtcrf 0x20,r1 ; Get set to test for sleep
1214 cror doze,doze,nap ; Remember if we are napping
1215 bf sleep,notsleep ; Skip if we are not trying to sleep
1216
1217 mtcrf 0x20,r0 ; Restore the CR
1218 lwz r0,saver0+4(r13) ; Restore R0
1219 lwz r1,saver1+4(r13) ; Restore R1
1220 mfsprg r13,0 ; Get the per_proc
1221 lwz r11,pfAvailable(r13) ; Get back the feature flags
1222 mfsprg r13,2 ; Restore R13
1223 mtsprg 2,r11 ; Set sprg2 to the features
1224 mfsprg r11,3 ; Restore R11
1225 rfi ; Jump back into sleep code...
1226 .long 0 ; Leave these here please...
1227 .long 0
1228 .long 0
1229 .long 0
1230 .long 0
1231 .long 0
1232 .long 0
1233 .long 0
1234
1235
1236 ;
1237 ; This is the 32-bit context saving stuff
1238 ;
1239
1240 .align 5
1241
1242 notsleep: stw r2,saver2+4(r13) ; Save this one
1243 bf doze,notspdo ; Skip the next if we are not napping/dozing...
1244 rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits
1245 mtspr hid0,r2 ; Clear the nap/doze bits
1246
1247 notspdo:
1248 la r1,saver4(r13) ; Point to the next line in case we need it
1249 crmove wasNapping,doze ; Remember if we were napping
1250 mfsprg r2,0 ; Get the per_proc area
1251 dcbz 0,r1 ; allocate r4-r7 32-byte line in cache
1252
1253 ;
1254 ; Remember, we are setting up CR6 with feature flags
1255 ;
1256 andi. r1,r11,T_FAM ; Check FAM bit
1257
1258 stw r3,saver3+4(r13) ; Save this one
1259 stw r4,saver4+4(r13) ; Save this one
1260 andc r11,r11,r1 ; Clear FAM bit
1261 beq+ noFAM ; Is it FAM intercept
1262 mfsrr1 r3 ; Load srr1
1263 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1264 beq+ noFAM ; From supervisor state
1265 lwz r1,spcFlags(r2) ; Load spcFlags
1266 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1267 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
1268 bne+ noFAM ; Can this context be FAM intercept
1269 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
1270 srwi r1,r11,2 ; Divide r11 by 4
1271 lis r3,0x8000 ; Set r3 to 0x80000000
1272 srw r1,r3,r1 ; Set bit for current exception
1273 and. r1,r1,r4 ; And current exception with the intercept mask
1274 beq+ noFAM ; Is it FAM intercept
1275 b EXT(vmm_fam_exc)
1276 noFAM:
1277 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1278 la r3,saver8(r13) ; Point to line with r8-r11
1279 mtcrf 0xE2,r1 ; Put the features flags (that we care about) in the CR
1280 dcbz 0,r3 ; allocate r8-r11 32-byte line in cache
1281 la r3,saver12(r13) ; point to r12-r15 line
1282 lis r4,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
1283 stw r6,saver6+4(r13) ; Save this one
1284 ori r4,r4,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
1285 stw r8,saver8+4(r13) ; Save this one
1286 crmove featAltivec,pfAltivecb ; Set the Altivec flag
1287 mtmsr r4 ; Set MSR
1288 isync
1289 mfsrr0 r6 ; Get the interruption SRR0
1290 la r8,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
1291 dcbz 0,r3 ; allocate r12-r15 32-byte line in cache
1292 la r3,saver16(r13) ; point to next line
1293 dcbz 0,r8 ; allocate 32-byte line with SRR0, SRR1, CR, XER, and LR
1294 stw r7,saver7+4(r13) ; Save this one
1295 mfsrr1 r7 ; Get the interrupt SRR1
1296 stw r6,savesrr0+4(r13) ; Save the SRR0
1297 stw r5,saver5+4(r13) ; Save this one
1298 mfsprg r6,2 ; Get interrupt time R13
1299 mtsprg 2,r1 ; Set the feature flags
1300 mfsprg r8,3 ; Get rupt time R11
1301 stw r7,savesrr1+4(r13) ; Save SRR1
1302 stw r8,saver11+4(r13) ; Save rupt time R11
1303 stw r6,saver13+4(r13) ; Save rupt R13
1304 dcbz 0,r3 ; allocate 32-byte line with r16-r19
1305 la r3,saver20(r13) ; point to next line
1306
1307 getTB: mftbu r6 ; Get the upper timebase
1308 mftb r7 ; Get the lower timebase
1309 mftbu r8 ; Get the upper one again
1310 cmplw r6,r8 ; Did the top tick?
1311 bne- getTB ; Yeah, need to get it again...
1312
1313 stw r8,ruptStamp(r2) ; Save the top of time stamp
1314 stw r8,SAVtime(r13) ; Save the top of time stamp
1315 stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp
1316 stw r7,SAVtime+4(r13) ; Save the bottom of time stamp
1317
1318 dcbz 0,r3 ; allocate 32-byte line with r20-r23
1319 stw r9,saver9+4(r13) ; Save this one
1320
1321 stw r10,saver10+4(r13) ; Save this one
1322 mflr r4 ; Get the LR
1323 mfxer r10 ; Get the XER
1324
1325 bf+ wasNapping,notNapping ; Skip if not waking up from nap...
1326
1327 lwz r6,napStamp+4(r2) ; Pick up low order nap stamp
1328 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
1329 lwz r5,napStamp(r2) ; and high order
1330 subfc r7,r6,r7 ; Subtract low stamp from now
1331 lwz r6,napTotal+4(r2) ; Pick up low total
1332 subfe r5,r5,r8 ; Subtract high stamp and borrow from now
1333 lwz r8,napTotal(r2) ; Pick up the high total
1334 addc r6,r6,r7 ; Add low to total
1335 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
1336 adde r8,r8,r5 ; Add high and carry to total
1337 stw r6,napTotal+4(r2) ; Save the low total
1338 stw r8,napTotal(r2) ; Save the high total
1339 stw r3,savesrr0+4(r13) ; Modify to return to nap/doze exit
1340
1341 rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored?
1342 beq notInSlowNap
1343
1344 lwz r3,pfHID1(r2) ; Get saved HID1 value
1345 mtspr hid1,r3 ; Restore HID1
1346
1347 notInSlowNap:
1348 rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored?
1349 beq notNapping
1350
1351 lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value
1352 mtspr msscr0,r3 ; Restore MSSCR0
1353 sync
1354 isync
1355
1356 notNapping: stw r12,saver12+4(r13) ; Save this one
1357
1358 stw r14,saver14+4(r13) ; Save this one
1359 stw r15,saver15+4(r13) ; Save this one
1360 la r14,saver24(r13) ; Point to the next block to save into
1361 mfctr r6 ; Get the CTR
1362 stw r16,saver16+4(r13) ; Save this one
1363 la r15,savectr(r13) ; point to line with CTR, DAR, DSISR, Exception code, and VRSAVE
1364 stw r4,savelr+4(r13) ; Save rupt LR
1365
1366 dcbz 0,r14 ; allocate 32-byte line with r24-r27
1367 la r16,saver28(r13) ; point to line with r28-r31
1368 dcbz 0,r15 ; allocate line with CTR, DAR, DSISR, Exception code, and VRSAVE
1369 stw r17,saver17+4(r13) ; Save this one
1370 stw r18,saver18+4(r13) ; Save this one
1371 stw r6,savectr+4(r13) ; Save rupt CTR
1372 stw r0,savecr(r13) ; Save rupt CR
1373 stw r19,saver19+4(r13) ; Save this one
1374 mfdar r6 ; Get the rupt DAR
1375 stw r20,saver20+4(r13) ; Save this one
1376 dcbz 0,r16 ; allocate 32-byte line with r28-r31
1377
1378 stw r21,saver21+4(r13) ; Save this one
1379 lwz r21,spcFlags(r2) ; Get the special flags from per_proc
1380 stw r10,savexer+4(r13) ; Save the rupt XER
1381 stw r30,saver30+4(r13) ; Save this one
1382 lhz r30,pfrptdProc(r2) ; Get the reported processor type
1383 stw r31,saver31+4(r13) ; Save this one
1384 stw r22,saver22+4(r13) ; Save this one
1385 stw r23,saver23+4(r13) ; Save this one
1386 stw r24,saver24+4(r13) ; Save this one
1387 stw r25,saver25+4(r13) ; Save this one
1388 mfdsisr r7 ; Get the rupt DSISR
1389 stw r26,saver26+4(r13) ; Save this one
1390 stw r27,saver27+4(r13) ; Save this one
1391 andis. r21,r21,hi16(perfMonitor) ; Is the performance monitor enabled?
1392 stw r28,saver28+4(r13) ; Save this one
1393 cmpwi cr1, r30,CPU_SUBTYPE_POWERPC_750 ; G3?
1394 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
1395 cmpwi cr2,r30,CPU_SUBTYPE_POWERPC_7400 ; This guy?
1396 stw r29,saver29+4(r13) ; Save R29
1397 stw r6,savedar+4(r13) ; Save the rupt DAR
1398 li r10,savepmc ; Point to pmc savearea
1399
1400 beq+ noPerfMonSave32 ; No perfmon on here...
1401
1402 dcbz r10,r13 ; Clear first part of pmc area
1403 li r10,savepmc+0x20 ; Point to pmc savearea second part
1404 li r22,0 ; r22: zero
1405 dcbz r10,r13 ; Clear second part of pmc area
1406
1407 beq cr1,perfMonSave32_750 ; This is a G3...
1408
1409 beq cr2,perfMonSave32_7400 ; Regular olde G4...
1410
1411 mfspr r24,pmc5 ; Here for a 7450
1412 mfspr r25,pmc6
1413 stw r24,savepmc+16(r13) ; Save PMC5
1414 stw r25,savepmc+20(r13) ; Save PMC6
1415 mtspr pmc5,r22 ; Leave PMC5 clear
1416 mtspr pmc6,r22 ; Leave PMC6 clear
1417
1418 perfMonSave32_7400:
1419 mfspr r25,mmcr2
1420 stw r25,savemmcr2+4(r13) ; Save MMCR2
1421 mtspr mmcr2,r22 ; Leave MMCR2 clear
1422
1423 perfMonSave32_750:
1424 mfspr r23,mmcr0
1425 mfspr r24,mmcr1
1426 stw r23,savemmcr0+4(r13) ; Save MMCR0
1427 stw r24,savemmcr1+4(r13) ; Save MMCR1
1428 mtspr mmcr0,r22 ; Leave MMCR0 clear
1429 mtspr mmcr1,r22 ; Leave MMCR1 clear
1430 mfspr r23,pmc1
1431 mfspr r24,pmc2
1432 mfspr r25,pmc3
1433 mfspr r26,pmc4
1434 stw r23,savepmc+0(r13) ; Save PMC1
1435 stw r24,savepmc+4(r13) ; Save PMC2
1436 stw r25,savepmc+8(r13) ; Save PMC3
1437 stw r26,savepmc+12(r13) ; Save PMC4
1438 mtspr pmc1,r22 ; Leave PMC1 clear
1439 mtspr pmc2,r22 ; Leave PMC2 clear
1440 mtspr pmc3,r22 ; Leave PMC3 clear
1441 mtspr pmc4,r22 ; Leave PMC4 clear
1442
1443 noPerfMonSave32:
1444 dcbz 0,r27 ; allocate line with VSCR and FPSCR
1445
1446 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1447 stw r11,saveexception(r13) ; Save the exception code
1448
1449
1450 ;
1451 ; Everything is saved at this point, except for FPRs, and VMX registers.
1452 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1453 ;
1454
1455 lwz r25,traceMask(0) ; Get the trace mask
1456 li r0,SAVgeneral ; Get the savearea type value
1457 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1458 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 4
1459 stb r0,SAVflags+2(r13) ; Mark valid context
1460 addi r22,r22,10 ; Adjust code so we shift into CR5
1461 li r23,trcWork ; Get the trace work area address
1462 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1463 li r26,0x8 ; Get start of cpu mask
1464 srw r26,r26,r19 ; Get bit position of cpu number
1465 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1466 and. r26,r26,r25 ; See if we trace this cpu
1467 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1468 ;
1469 ; At this point, we can take another exception and lose nothing.
1470 ;
1471
1472 bne+ cr5,xcp32xit ; Skip all of this if no tracing here...
1473
1474 ;
1475 ; We select a trace entry using a compare and swap on the next entry field.
1476 ; Since we do not lock the actual trace buffer, there is a potential that
1477 ; another processor could wrap an trash our entry. Who cares?
1478 ;
1479
1480 lwz r25,traceStart(0) ; Get the start of trace table
1481 lwz r26,traceEnd(0) ; Get end of trace table
1482
1483 trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1484
1485 addi r22,r20,LTR_size ; Point to the next trace entry
1486 cmplw r22,r26 ; Do we need to wrap the trace table?
1487 bne+ gotTrcEnt ; No wrap, we got us a trace entry...
1488
1489 mr r22,r25 ; Wrap back to start
1490
1491 gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer
1492 bne- trcsel ; Collision, try again...
1493
1494 #if ESPDEBUG
1495 dcbf 0,r23 ; Force to memory
1496 sync
1497 #endif
1498
1499 dcbz 0,r20 ; Clear and allocate first trace line
1500
1501 ;
1502 ; Let us cut that trace entry now.
1503 ;
1504 ; Note that this code cuts a trace table entry for everything but the CutTrace call.
1505 ; An identical entry is made during normal CutTrace processing. Any entry
1506 ; format changes made must be done in both places.
1507 ;
1508
1509 lwz r16,ruptStamp(r2) ; Get top of time base
1510 lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp
1511
1512 li r14,32 ; Offset to second line
1513
1514 lwz r0,saver0+4(r13) ; Get back interrupt time R0
1515 lwz r1,saver1+4(r13) ; Get back interrupt time R1
1516 lwz r8,savecr(r13) ; Get the CR value
1517
1518 dcbz r14,r20 ; Zap the second line
1519
1520 sth r19,LTR_cpu(r20) ; Stash the cpu number
1521 li r14,64 ; Offset to third line
1522 sth r11,LTR_excpt(r20) ; Save the exception type
1523 lwz r7,saver2+4(r13) ; Get back interrupt time R2
1524 lwz r3,saver3+4(r13) ; Restore this one
1525
1526 dcbz r14,r20 ; Zap the third half
1527
1528 mfdsisr r9 ; Get the DSISR
1529 li r14,96 ; Offset to forth line
1530 stw r16,LTR_timeHi(r20) ; Set the upper part of TB
1531 stw r17,LTR_timeLo(r20) ; Set the lower part of TB
1532 lwz r10,savelr+4(r13) ; Get the LR
1533 mfsrr0 r17 ; Get SRR0 back, it is still good
1534
1535 dcbz r14,r20 ; Zap the forth half
1536 lwz r4,saver4+4(r13) ; Restore this one
1537 lwz r5,saver5+4(r13) ; Restore this one
1538 mfsrr1 r18 ; SRR1 is still good in here
1539
1540 stw r8,LTR_cr(r20) ; Save the CR
1541 lwz r6,saver6+4(r13) ; Get R6
1542 mfdar r16 ; Get this back
1543 stw r9,LTR_dsisr(r20) ; Save the DSISR
1544 stw r17,LTR_srr0+4(r20) ; Save the SSR0
1545
1546 stw r18,LTR_srr1+4(r20) ; Save the SRR1
1547 stw r16,LTR_dar+4(r20) ; Save the DAR
1548 mfctr r17 ; Get the CTR (still good in register)
1549 stw r13,LTR_save+4(r20) ; Save the savearea
1550 stw r10,LTR_lr+4(r20) ; Save the LR
1551
1552 stw r17,LTR_ctr+4(r20) ; Save off the CTR
1553 stw r0,LTR_r0+4(r20) ; Save off register 0
1554 stw r1,LTR_r1+4(r20) ; Save off register 1
1555 stw r7,LTR_r2+4(r20) ; Save off register 2
1556
1557
1558 stw r3,LTR_r3+4(r20) ; Save off register 3
1559 stw r4,LTR_r4+4(r20) ; Save off register 4
1560 stw r5,LTR_r5+4(r20) ; Save off register 5
1561 stw r6,LTR_r6+4(r20) ; Save off register 6
1562
1563 #if ESPDEBUG
1564 addi r17,r20,32 ; Second line
1565 addi r16,r20,64 ; Third line
1566 dcbst br0,r20 ; Force to memory
1567 dcbst br0,r17 ; Force to memory
1568 addi r17,r17,32 ; Fourth line
1569 dcbst br0,r16 ; Force to memory
1570 dcbst br0,r17 ; Force to memory
1571
1572 sync ; Make sure it all goes
1573 #endif
1574 xcp32xit: mr r14,r11 ; Save the interrupt code across the call
1575 bl EXT(save_get_phys_32) ; Grab a savearea
1576 mfsprg r2,0 ; Get the per_proc info
1577 li r10,emfp0 ; Point to floating point save
1578 mr r11,r14 ; Get the exception code back
1579 dcbz r10,r2 ; Clear for speed
1580 stw r3,next_savearea+4(r2) ; Store the savearea for the next rupt
1581
1582 b xcpCommon ; Go join the common interrupt processing...
1583
1584 ;
1585 ;
1586 ; This is the 64-bit context saving stuff
1587 ;
1588
1589 .align 5
1590
1591 extEntry64: mfsprg r13,0 ; Load per_proc
1592 ld r13,next_savearea(r13) ; Get the exception save area
1593 std r0,saver0(r13) ; Save register 0
1594 lis r0,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
1595 std r1,saver1(r13) ; Save register 1
1596 ori r1,r0,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
1597 lis r0,0x0010 ; Get rupt code transform validity mask
1598 mtmsr r1 ; Set MSR
1599 isync
1600
1601 ori r0,r0,0x0200 ; Get rupt code transform validity mask
1602 std r2,saver2(r13) ; Save this one
1603 lis r1,0x00F0 ; Top half of xform XOR
1604 rlwinm r2,r11,29,27,31 ; Get high 5 bits of rupt code
1605 std r3,saver3(r13) ; Save this one
1606 slw r0,r0,r2 ; Move transform validity bit to bit 0
1607 std r4,saver4(r13) ; Save this one
1608 std r5,saver5(r13) ; Save this one
1609 ori r1,r1,0x04EC ; Bottom half of xform XOR
1610 mfxer r5 ; Save the XER because we are about to muck with it
1611 rlwinm r4,r11,1,27,28 ; Get bottom of interrupt code * 8
1612 lis r3,hi16(dozem|napm) ; Get the nap and doze bits
1613 srawi r0,r0,31 ; Get 0xFFFFFFFF of xform valid, 0 otherwise
1614 rlwnm r4,r1,r4,24,31 ; Extract the xform XOR
1615 li r1,saver16 ; Point to the next line
1616 and r4,r4,r0 ; Only keep transform if we are to use it
1617 li r2,lgKillResv ; Point to the killing field
1618 mfcr r0 ; Save the CR
1619 stwcx. r2,0,r2 ; Kill any pending reservation
1620 dcbz128 r1,r13 ; Blow away the line
1621 sldi r3,r3,32 ; Position it
1622 mfspr r1,hid0 ; Get HID0
1623 andc r3,r1,r3 ; Clear nap and doze
1624 xor r11,r11,r4 ; Transform 970 rupt code to standard keeping FAM bit
1625 cmpld r3,r1 ; See if nap and/or doze was on
1626 std r6,saver6(r13) ; Save this one
1627 mfsprg r2,0 ; Get the per_proc area
1628 la r6,savesrr0(r13) ; point to line with SRR0, SRR1, CR, XER, and LR
1629 beq++ eE64NoNap ; No nap here, skip all this...
1630
1631 sync ; Make sure we are clean
1632 mtspr hid0,r3 ; Set the updated hid0
1633 mfspr r1,hid0 ; Yes, this is silly, keep it here
1634 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1635 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1636 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1637 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1638 mfspr r1,hid0 ; Yes, this is a duplicate, keep it here
1639
1640 eE64NoNap: crnot wasNapping,cr0_eq ; Remember if we were napping
1641 andi. r1,r11,T_FAM ; Check FAM bit
1642 beq++ eEnoFAM ; Is it FAM intercept
1643 mfsrr1 r3 ; Load srr1
1644 andc r11,r11,r1 ; Clear FAM bit
1645 rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
1646 beq++ eEnoFAM ; From supervisor state
1647 lwz r1,spcFlags(r2) ; Load spcFlags
1648 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
1649 cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
1650 bne++ eEnoFAM ; Can this context be FAM intercept
1651 lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept
1652 li r3,0 ; Clear
1653 srwi r1,r11,2 ; divide r11 by 4
1654 oris r3,r3,0x8000 ; Set r3 to 0x80000000
1655 srw r1,r3,r1 ; Set bit for current exception
1656 and. r1,r1,r4 ; And current exception with the intercept mask
1657 beq++ eEnoFAM ; Is it FAM intercept
1658 b EXT(vmm_fam_exc)
1659
1660 .align 5
1661
1662 eEnoFAM: lwz r1,pfAvailable(r2) ; Get the CPU features flags
1663 dcbz128 0,r6 ; allocate 128-byte line with SRR0, SRR1, CR, XER, and LR
1664
1665 ;
1666 ; Remember, we are setting up CR6 with feature flags
1667 ;
1668 std r7,saver7(r13) ; Save this one
1669 mtcrf 0x80,r1 ; Put the features flags (that we care about) in the CR
1670 std r8,saver8(r13) ; Save this one
1671 mtcrf 0x40,r1 ; Put the features flags (that we care about) in the CR
1672 mfsrr0 r6 ; Get the interruption SRR0
1673 mtcrf 0x20,r1 ; Put the features flags (that we care about) in the CR
1674 mfsrr1 r7 ; Get the interrupt SRR1
1675 std r6,savesrr0(r13) ; Save the SRR0
1676 mtcrf 0x02,r1 ; Put the features flags (that we care about) in the CR
1677 std r9,saver9(r13) ; Save this one
1678 crmove featAltivec,pfAltivecb ; Set the Altivec flag
1679 std r7,savesrr1(r13) ; Save SRR1
1680 mfsprg r9,3 ; Get rupt time R11
1681 std r10,saver10(r13) ; Save this one
1682 mfsprg r6,2 ; Get interrupt time R13
1683 std r9,saver11(r13) ; Save rupt time R11
1684 mtsprg 2,r1 ; Set the feature flags
1685 std r12,saver12(r13) ; Save this one
1686 mflr r4 ; Get the LR
1687 mftb r7 ; Get the timebase
1688 std r6,saver13(r13) ; Save rupt R13
1689 std r7,ruptStamp(r2) ; Save the time stamp
1690 std r7,SAVtime(r13) ; Save the time stamp
1691
1692 bf++ wasNapping,notNappingSF ; Skip if not waking up from nap...
1693
1694 ld r6,napStamp(r2) ; Pick up nap stamp
1695 lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return
1696 sub r7,r7,r6 ; Subtract stamp from now
1697 ld r6,napTotal(r2) ; Pick up total
1698 add r6,r6,r7 ; Add low to total
1699 ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return
1700 std r6,napTotal(r2) ; Save the high total
1701 std r3,savesrr0(r13) ; Modify to return to nap/doze exit
1702
1703 notNappingSF:
1704 std r14,saver14(r13) ; Save this one
1705 std r15,saver15(r13) ; Save this one
1706 stw r0,savecr(r13) ; Save rupt CR
1707 mfctr r6 ; Get the CTR
1708 std r16,saver16(r13) ; Save this one
1709 std r4,savelr(r13) ; Save rupt LR
1710
1711 std r17,saver17(r13) ; Save this one
1712 li r7,savepmc ; Point to pmc area
1713 std r18,saver18(r13) ; Save this one
1714 lwz r17,spcFlags(r2) ; Get the special flags from per_proc
1715 std r6,savectr(r13) ; Save rupt CTR
1716 std r19,saver19(r13) ; Save this one
1717 mfdar r6 ; Get the rupt DAR
1718 std r20,saver20(r13) ; Save this one
1719
1720 dcbz128 r7,r13 ; Clear out the pmc spot
1721
1722 std r21,saver21(r13) ; Save this one
1723 std r5,savexer(r13) ; Save the rupt XER
1724 std r22,saver22(r13) ; Save this one
1725 std r23,saver23(r13) ; Save this one
1726 std r24,saver24(r13) ; Save this one
1727 std r25,saver25(r13) ; Save this one
1728 mfdsisr r7 ; Get the rupt DSISR
1729 std r26,saver26(r13) ; Save this one
1730 andis. r17,r17,hi16(perfMonitor) ; Is the performance monitor enabled?
1731 std r27,saver27(r13) ; Save this one
1732 li r10,emfp0 ; Point to floating point save
1733 std r28,saver28(r13) ; Save this one
1734 la r27,savevscr(r13) ; point to 32-byte line with VSCR and FPSCR
1735 std r29,saver29(r13) ; Save R29
1736 std r30,saver30(r13) ; Save this one
1737 std r31,saver31(r13) ; Save this one
1738 std r6,savedar(r13) ; Save the rupt DAR
1739 stw r7,savedsisr(r13) ; Save the rupt code DSISR
1740 stw r11,saveexception(r13) ; Save the exception code
1741
1742 beq++ noPerfMonSave64 ; Performance monitor not on...
1743
1744 li r22,0 ; r22: zero
1745
1746 mfspr r23,mmcr0_gp
1747 mfspr r24,mmcr1_gp
1748 mfspr r25,mmcra_gp
1749 std r23,savemmcr0(r13) ; Save MMCR0
1750 std r24,savemmcr1(r13) ; Save MMCR1
1751 std r25,savemmcr2(r13) ; Save MMCRA
1752 mtspr mmcr0_gp,r22 ; Leave MMCR0 clear
1753 mtspr mmcr1_gp,r22 ; Leave MMCR1 clear
1754 mtspr mmcra_gp,r22 ; Leave MMCRA clear
1755 mfspr r23,pmc1_gp
1756 mfspr r24,pmc2_gp
1757 mfspr r25,pmc3_gp
1758 mfspr r26,pmc4_gp
1759 stw r23,savepmc+0(r13) ; Save PMC1
1760 stw r24,savepmc+4(r13) ; Save PMC2
1761 stw r25,savepmc+8(r13) ; Save PMC3
1762 stw r26,savepmc+12(r13) ; Save PMC4
1763 mfspr r23,pmc5_gp
1764 mfspr r24,pmc6_gp
1765 mfspr r25,pmc7_gp
1766 mfspr r26,pmc8_gp
1767 stw r23,savepmc+16(r13) ; Save PMC5
1768 stw r24,savepmc+20(r13) ; Save PMC6
1769 stw r25,savepmc+24(r13) ; Save PMC7
1770 stw r26,savepmc+28(r13) ; Save PMC8
1771 mtspr pmc1_gp,r22 ; Leave PMC1 clear
1772 mtspr pmc2_gp,r22 ; Leave PMC2 clear
1773 mtspr pmc3_gp,r22 ; Leave PMC3 clear
1774 mtspr pmc4_gp,r22 ; Leave PMC4 clear
1775 mtspr pmc5_gp,r22 ; Leave PMC5 clear
1776 mtspr pmc6_gp,r22 ; Leave PMC6 clear
1777 mtspr pmc7_gp,r22 ; Leave PMC7 clear
1778 mtspr pmc8_gp,r22 ; Leave PMC8 clear
1779
1780 noPerfMonSave64:
1781
1782 ;
1783 ; Everything is saved at this point, except for FPRs, and VMX registers.
1784 ; Time for us to get a new savearea and then trace interrupt if it is enabled.
1785 ;
1786
1787 lwz r25,traceMask(0) ; Get the trace mask
1788 li r0,SAVgeneral ; Get the savearea type value
1789 lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
1790 stb r0,SAVflags+2(r13) ; Mark valid context
1791 rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
1792 li r23,trcWork ; Get the trace work area address
1793 addi r22,r22,10 ; Adjust code so we shift into CR5
1794 li r26,0x8 ; Get start of cpu mask
1795 rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed
1796 srw r26,r26,r19 ; Get bit position of cpu number
1797 mtcrf 0x04,r7 ; Set CR5 to show trace or not
1798 and. r26,r26,r25 ; See if we trace this cpu
1799 crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled
1800
1801 bne++ cr5,xcp64xit ; Skip all of this if no tracing here...
1802
1803 ;
1804 ; We select a trace entry using a compare and swap on the next entry field.
1805 ; Since we do not lock the actual trace buffer, there is a potential that
1806 ; another processor could wrap an trash our entry. Who cares?
1807 ;
1808
1809 lwz r25,traceStart(0) ; Get the start of trace table
1810 lwz r26,traceEnd(0) ; Get end of trace table
1811
1812 trcselSF: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
1813
1814 addi r22,r20,LTR_size ; Point to the next trace entry
1815 cmplw r22,r26 ; Do we need to wrap the trace table?
1816 bne++ gotTrcEntSF ; No wrap, we got us a trace entry...
1817
1818 mr r22,r25 ; Wrap back to start
1819
1820 gotTrcEntSF:
1821 stwcx. r22,0,r23 ; Try to update the current pointer
1822 bne- trcselSF ; Collision, try again...
1823
1824 #if ESPDEBUG
1825 dcbf 0,r23 ; Force to memory
1826 sync
1827 #endif
1828
1829 ;
1830 ; Let us cut that trace entry now.
1831 ;
1832 ; Note that this code cuts a trace table entry for everything but the CutTrace call.
1833 ; An identical entry is made during normal CutTrace processing. Any entry
1834 ; format changes made must be done in both places.
1835 ;
1836
1837 dcbz128 0,r20 ; Zap the trace entry
1838
1839 lwz r9,SAVflags(r13) ; Get savearea flags
1840
1841 ld r16,ruptStamp(r2) ; Get top of time base
1842 ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not)
1843 std r16,LTR_timeHi(r20) ; Set the upper part of TB
1844 ld r1,saver1(r13) ; Get back interrupt time R1
1845 rlwinm r9,r9,20,16,23 ; Isolate the special flags
1846 ld r18,saver2(r13) ; Get back interrupt time R2
1847 std r0,LTR_r0(r20) ; Save off register 0
1848 rlwimi r9,r19,0,24,31 ; Slide in the cpu number
1849 ld r3,saver3(r13) ; Restore this one
1850 sth r9,LTR_cpu(r20) ; Stash the cpu number and special flags
1851 std r1,LTR_r1(r20) ; Save off register 1
1852 ld r4,saver4(r13) ; Restore this one
1853 std r18,LTR_r2(r20) ; Save off register 2
1854 ld r5,saver5(r13) ; Restore this one
1855 ld r6,saver6(r13) ; Get R6
1856 std r3,LTR_r3(r20) ; Save off register 3
1857 lwz r16,savecr(r13) ; Get the CR value
1858 std r4,LTR_r4(r20) ; Save off register 4
1859 mfsrr0 r17 ; Get SRR0 back, it is still good
1860 std r5,LTR_r5(r20) ; Save off register 5
1861 std r6,LTR_r6(r20) ; Save off register 6
1862 mfsrr1 r18 ; SRR1 is still good in here
1863 stw r16,LTR_cr(r20) ; Save the CR
1864 std r17,LTR_srr0(r20) ; Save the SSR0
1865 std r18,LTR_srr1(r20) ; Save the SRR1
1866
1867 mfdar r17 ; Get this back
1868 ld r16,savelr(r13) ; Get the LR
1869 std r17,LTR_dar(r20) ; Save the DAR
1870 mfctr r17 ; Get the CTR (still good in register)
1871 std r16,LTR_lr(r20) ; Save the LR
1872 std r17,LTR_ctr(r20) ; Save off the CTR
1873 mfdsisr r17 ; Get the DSISR
1874 std r13,LTR_save(r20) ; Save the savearea
1875 stw r17,LTR_dsisr(r20) ; Save the DSISR
1876 sth r11,LTR_excpt(r20) ; Save the exception type
1877 #if 0
1878 lwz r17,FPUowner(r2) ; (TEST/DEBUG) Get the current floating point owner
1879 stw r17,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
1880 #endif
1881
1882 #if ESPDEBUG
1883 dcbf 0,r20 ; Force to memory
1884 sync ; Make sure it all goes
1885 #endif
1886 xcp64xit: mr r14,r11 ; Save the interrupt code across the call
1887 bl EXT(save_get_phys_64) ; Grab a savearea
1888 mfsprg r2,0 ; Get the per_proc info
1889 li r10,emfp0 ; Point to floating point save
1890 mr r11,r14 ; Get the exception code back
1891 dcbz128 r10,r2 ; Clear for speed
1892 std r3,next_savearea(r2) ; Store the savearea for the next rupt
1893 b xcpCommon ; Go join the common interrupt processing...
1894
1895 ;
1896 ; All of the context is saved. Now we will get a
1897 ; fresh savearea. After this we can take an interrupt.
1898 ;
1899
1900 .align 5
1901
1902 xcpCommon:
1903
1904 ;
1905 ; Here we will save some floating point and vector status
1906 ; and we also set a clean default status for a new interrupt level.
1907 ; Note that we assume that emfp0 is on an altivec boundary
1908 ; and that R10 points to it (as a displacemnt from R2).
1909 ;
1910 ; We need to save the FPSCR as if it is normal context.
1911 ; This is because pending exceptions will cause an exception even if
1912 ; FP is disabled. We need to clear the FPSCR when we first start running in the
1913 ; kernel.
1914 ;
1915
1916 stfd f0,emfp0(r2) ; Save FPR0
1917 stfd f1,emfp1(r2) ; Save FPR1
1918 li r19,0 ; Assume no Altivec
1919 mffs f0 ; Get the FPSCR
1920 lfd f1,Zero(0) ; Make a 0
1921 stfd f0,savefpscrpad(r13) ; Save the FPSCR
1922 li r9,0 ; Get set to clear VRSAVE
1923 mtfsf 0xFF,f1 ; Clear it
1924 addi r14,r10,16 ; Displacement to second vector register
1925 lfd f0,emfp0(r2) ; Restore FPR0
1926 la r28,savevscr(r13) ; Point to the status area
1927 lfd f1,emfp1(r2) ; Restore FPR1
1928
1929 bf featAltivec,noavec ; No Altivec on this CPU...
1930
1931 stvxl v0,r10,r2 ; Save a register
1932 stvxl v1,r14,r2 ; Save a second register
1933 mfspr r19,vrsave ; Get the VRSAVE register
1934 mfvscr v0 ; Get the vector status register
1935 vspltish v1,1 ; Turn on the non-Java bit and saturate
1936 stvxl v0,0,r28 ; Save the vector status
1937 vspltisw v0,1 ; Turn on the saturate bit
1938 vxor v1,v1,v0 ; Turn off saturate
1939 mtvscr v1 ; Set the non-java, no saturate status for new level
1940 mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level
1941
1942 lvxl v0,r10,r2 ; Restore first work register
1943 lvxl v1,r14,r2 ; Restore second work register
1944
1945 noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags
1946
1947 ;
1948 ; We are now done saving all of the context. Start filtering the interrupts.
1949 ; Note that a Redrive will count as an actual interrupt.
1950 ; Note also that we take a lot of system calls so we will start decode here.
1951 ;
1952
1953 Redrive:
1954 lwz r22,SAVflags(r13) ; Pick up the flags
1955 lwz r0,saver0+4(r13) ; Get back interrupt time syscall number
1956 mfsprg r2,0 ; Restore per_proc
1957
1958 lwz r20,lo16(xcpTable)(r11) ; Get the interrupt handler (note: xcpTable must be in 1st 32k of physical memory)
1959 la r12,hwCounts(r2) ; Point to the exception count area
1960 andis. r24,r22,hi16(SAVeat) ; Should we eat this one?
1961 rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving
1962 add r12,r12,r11 ; Point to the count
1963 lwz r25,0(r12) ; Get the old value
1964 lwz r23,hwRedrives(r2) ; Get the redrive count
1965 crmove cr3_eq,cr0_eq ; Remember if we are ignoring
1966 xori r24,r22,1 ; Get the NOT of the redrive
1967 mtctr r20 ; Point to the interrupt handler
1968 mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code
1969 add r25,r25,r24 ; Count this one if not a redrive
1970 add r23,r23,r22 ; Count this one if if is a redrive
1971 crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x
1972 stw r25,0(r12) ; Store it back
1973 stw r23,hwRedrives(r2) ; Save the redrive count
1974 bne-- cr3,IgnoreRupt ; Interruption is being ignored...
1975 bctr ; Go process the exception...
1976
1977
1978 ;
1979 ; Exception vector filter table (like everything in this file, must be in 1st 32KB of physical memory)
1980 ;
1981
1982 .align 7
1983
1984 xcpTable:
1985 .long EatRupt ; T_IN_VAIN
1986 .long PassUpTrap ; T_RESET
1987 .long MachineCheck ; T_MACHINE_CHECK
1988 .long EXT(handlePF) ; T_DATA_ACCESS
1989 .long EXT(handlePF) ; T_INSTRUCTION_ACCESS
1990 .long PassUpRupt ; T_INTERRUPT
1991 .long EXT(AlignAssist) ; T_ALIGNMENT
1992 .long ProgramChk ; T_PROGRAM
1993 .long PassUpFPU ; T_FP_UNAVAILABLE
1994 .long PassUpRupt ; T_DECREMENTER
1995 .long PassUpTrap ; T_IO_ERROR
1996 .long PassUpTrap ; T_RESERVED
1997 .long xcpSyscall ; T_SYSTEM_CALL
1998 .long PassUpTrap ; T_TRACE
1999 .long PassUpTrap ; T_FP_ASSIST
2000 .long PassUpTrap ; T_PERF_MON
2001 .long PassUpVMX ; T_VMX
2002 .long PassUpTrap ; T_INVALID_EXCP0
2003 .long PassUpTrap ; T_INVALID_EXCP1
2004 .long PassUpTrap ; T_INVALID_EXCP2
2005 .long PassUpTrap ; T_INSTRUCTION_BKPT
2006 .long PassUpRupt ; T_SYSTEM_MANAGEMENT
2007 .long EXT(AltivecAssist) ; T_ALTIVEC_ASSIST
2008 .long PassUpRupt ; T_THERMAL
2009 .long PassUpTrap ; T_INVALID_EXCP5
2010 .long PassUpTrap ; T_INVALID_EXCP6
2011 .long PassUpTrap ; T_INVALID_EXCP7
2012 .long PassUpTrap ; T_INVALID_EXCP8
2013 .long PassUpTrap ; T_INVALID_EXCP9
2014 .long PassUpTrap ; T_INVALID_EXCP10
2015 .long PassUpTrap ; T_INVALID_EXCP11
2016 .long PassUpTrap ; T_INVALID_EXCP12
2017 .long PassUpTrap ; T_INVALID_EXCP13
2018
2019 .long PassUpTrap ; T_RUNMODE_TRACE
2020
2021 .long PassUpRupt ; T_SIGP
2022 .long PassUpTrap ; T_PREEMPT
2023 .long conswtch ; T_CSWITCH
2024 .long PassUpRupt ; T_SHUTDOWN
2025 .long PassUpAbend ; T_CHOKE
2026
2027 .long EXT(handleDSeg) ; T_DATA_SEGMENT
2028 .long EXT(handleISeg) ; T_INSTRUCTION_SEGMENT
2029
2030 .long WhoaBaby ; T_SOFT_PATCH
2031 .long WhoaBaby ; T_MAINTENANCE
2032 .long WhoaBaby ; T_INSTRUMENTATION
2033 .long WhoaBaby ; T_ARCHDEP0
2034 .long EatRupt ; T_HDEC
2035 ;
2036 ; Just what the heck happened here????
2037 ; NB: also get here from UFT dispatch table, on bogus index
2038 ;
2039
2040 WhoaBaby: b . ; Open the hood and wait for help
2041
2042 .align 5
2043
2044 IgnoreRupt:
2045 lwz r20,hwIgnored(r2) ; Grab the ignored interruption count
2046 addi r20,r20,1 ; Count this one
2047 stw r20,hwIgnored(r2) ; Save the ignored count
2048 b EatRupt ; Ignore it...
2049
2050
2051
2052 ;
2053 ; System call
2054 ;
2055
2056 .align 5
2057
2058 xcpSyscall: lis r20,hi16(EXT(shandler)) ; Assume this is a normal one, get handler address
2059 rlwinm r6,r0,1,0,31 ; Move sign bit to the end
2060 ori r20,r20,lo16(EXT(shandler)) ; Assume this is a normal one, get handler address
2061 bnl++ cr0,PassUp ; R0 not 0b10xxx...x, can not be any kind of magical system call, just pass it up...
2062 lwz r7,savesrr1+4(r13) ; Get the entering MSR (low half)
2063 lwz r1,dgFlags(0) ; Get the flags
2064 cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it
2065
2066 rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state?
2067 beq++ FCisok ; From supervisor state...
2068
2069 rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid?
2070 beq++ PassUp ; No, treat as a normal one...
2071
2072 FCisok: beq++ cr2,EatRupt ; This is a CutTrace system call, we are done with it...
2073
2074 ;
2075 ; Here is where we call the firmware. If it returns T_IN_VAIN, that means
2076 ; that it has handled the interruption. Remember: thou shalt not trash R13
2077 ; while you are away. Anything else is ok.
2078 ;
2079
2080 lwz r3,saver3+4(r13) ; Restore the first parameter
2081 b EXT(FirmwareCall) ; Go handle the firmware call....
2082
2083 ;
2084 ; Here is where we return from the firmware call
2085 ;
2086
2087 .align 5
2088 .globl EXT(FCReturn)
2089
2090 LEXT(FCReturn)
2091 cmplwi r3,T_IN_VAIN ; Was it handled?
2092 beq++ EatRupt ; Interrupt was handled...
2093 mr r11,r3 ; Put the rupt code into the right register
2094 b Redrive ; Go through the filter again...
2095
2096
2097 ;
2098 ; Here is where we return from the PTE miss and segment exception handler
2099 ;
2100
2101 .align 5
2102 .globl EXT(PFSExit)
2103
2104 LEXT(PFSExit)
2105
2106 #if 0
2107 mfsprg r2,0 ; (BRINGUP)
2108 lwz r0,savedsisr(r13) ; (BRINGUP)
2109 andis. r0,r0,hi16(dsiAC) ; (BRINGUP)
2110 beq++ didnthit ; (BRINGUP)
2111 lwz r0,20(0) ; (BRINGUP)
2112 mr. r0,r0 ; (BRINGUP)
2113 bne-- didnthit ; (BRINGUP)
2114 #if 0
2115 li r0,1 ; (BRINGUP)
2116 stw r0,20(0) ; (BRINGUP)
2117 lis r0,hi16(Choke) ; (BRINGUP)
2118 ori r0,r0,lo16(Choke) ; (BRINGUP)
2119 sc ; (BRINGUP)
2120 #endif
2121
2122 lwz r4,savesrr0+4(r13) ; (BRINGUP)
2123 lwz r8,savesrr1+4(r13) ; (BRINGUP)
2124 lwz r6,savedar+4(r13) ; (BRINGUP)
2125 rlwinm. r0,r8,0,MSR_IR_BIT,MSR_IR_BIT ; (BRINGUP)
2126 mfmsr r9 ; (BRINGUP)
2127 ori r0,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
2128 beq-- hghg ; (BRINGUP)
2129 mtmsr r0 ; (BRINGUP)
2130 isync ; (BRINGUP)
2131
2132 hghg: lwz r5,0(r4) ; (BRINGUP)
2133 beq-- hghg1 ; (BRINGUP)
2134 mtmsr r9 ; (BRINGUP)
2135 isync ; (BRINGUP)
2136
2137 hghg1: rlwinm r7,r5,6,26,31 ; (BRINGUP)
2138 rlwinm r27,r5,14,24,28 ; (BRINGUP)
2139 addi r3,r13,saver0+4 ; (BRINGUP)
2140 lwzx r3,r3,r27 ; (BRINGUP)
2141
2142 #if 0
2143 lwz r27,patcharea+4(r2) ; (BRINGUP)
2144 mr. r3,r3 ; (BRINGUP)
2145 bne++ nbnbnb ; (BRINGUP)
2146 addi r27,r27,1 ; (BRINGUP)
2147 stw r27,patcharea+4(r2) ; (BRINGUP)
2148 nbnbnb:
2149 #endif
2150
2151 rlwinm. r28,r8,0,MSR_DR_BIT,MSR_DR_BIT ; (BRINGUP)
2152 rlwinm r27,r6,0,0,29 ; (BRINGUP)
2153 ori r28,r9,lo16(MASK(MSR_DR)) ; (BRINGUP)
2154 mfspr r10,dabr ; (BRINGUP)
2155 li r0,0 ; (BRINGUP)
2156 mtspr dabr,r0 ; (BRINGUP)
2157 cmplwi cr1,r7,31 ; (BRINGUP)
2158 beq-- qqq0 ; (BRINGUP)
2159 mtmsr r28 ; (BRINGUP)
2160 qqq0:
2161 isync ; (BRINGUP)
2162
2163 lwz r27,0(r27) ; (BRINGUP) - Get original value
2164
2165 bne cr1,qqq1 ; (BRINGUP)
2166
2167 rlwinm r5,r5,31,22,31 ; (BRINGUP)
2168 cmplwi cr1,r5,151 ; (BRINGUP)
2169 beq cr1,qqq3 ; (BRINGUP)
2170 cmplwi cr1,r5,407 ; (BRINGUP)
2171 beq cr1,qqq2 ; (BRINGUP)
2172 cmplwi cr1,r5,215 ; (BRINGUP)
2173 beq cr1,qqq0q ; (BRINGUP)
2174 cmplwi cr1,r5,1014 ; (BRINGUP)
2175 beq cr1,qqqm1 ; (BRINGUP)
2176
2177 lis r0,hi16(Choke) ; (BRINGUP)
2178 ori r0,r0,lo16(Choke) ; (BRINGUP)
2179 sc ; (BRINGUP)
2180
2181 qqqm1: rlwinm r7,r6,0,0,26 ; (BRINGUP)
2182 stw r0,0(r7) ; (BRINGUP)
2183 stw r0,4(r7) ; (BRINGUP)
2184 stw r0,8(r7) ; (BRINGUP)
2185 stw r0,12(r7) ; (BRINGUP)
2186 stw r0,16(r7) ; (BRINGUP)
2187 stw r0,20(r7) ; (BRINGUP)
2188 stw r0,24(r7) ; (BRINGUP)
2189 stw r0,28(r7) ; (BRINGUP)
2190 b qqq9
2191
2192 qqq1: cmplwi r7,38 ; (BRINGUP)
2193 bgt qqq2 ; (BRINGUP)
2194 blt qqq3 ; (BRINGUP)
2195
2196 qqq0q: stb r3,0(r6) ; (BRINGUP)
2197 b qqq9 ; (BRINGUP)
2198
2199 qqq2: sth r3,0(r6) ; (BRINGUP)
2200 b qqq9 ; (BRINGUP)
2201
2202 qqq3: stw r3,0(r6) ; (BRINGUP)
2203
2204 qqq9:
2205 #if 0
2206 rlwinm r7,r6,0,0,29 ; (BRINGUP)
2207 lwz r0,0(r7) ; (BRINGUP) - Get newest value
2208 #else
2209 lis r7,hi16(0x000792B8) ; (BRINGUP)
2210 ori r7,r7,lo16(0x000792B8) ; (BRINGUP)
2211 lwz r0,0(r7) ; (BRINGUP) - Get newest value
2212 #endif
2213 mtmsr r9 ; (BRINGUP)
2214 mtspr dabr,r10 ; (BRINGUP)
2215 isync ; (BRINGUP)
2216
2217 #if 0
2218 lwz r28,patcharea+12(r2) ; (BRINGUP)
2219 mr. r28,r28 ; (BRINGUP)
2220 bne++ qqq12 ; (BRINGUP)
2221 lis r28,0x4000 ; (BRINGUP)
2222
2223 qqq12: stw r27,0(r28) ; (BRINGUP)
2224 lwz r6,savedar+4(r13) ; (BRINGUP)
2225 stw r0,4(r28) ; (BRINGUP)
2226 stw r4,8(r28) ; (BRINGUP)
2227 stw r6,12(r28) ; (BRINGUP)
2228 addi r28,r28,16 ; (BRINGUP)
2229 mr. r3,r3 ; (BRINGUP)
2230 stw r28,patcharea+12(r2) ; (BRINGUP)
2231 lwz r10,patcharea+8(r2) ; (BRINGUP)
2232 lwz r0,patcharea+4(r2) ; (BRINGUP)
2233 #endif
2234
2235 #if 1
2236 stw r0,patcharea(r2) ; (BRINGUP)
2237 #endif
2238
2239 #if 0
2240 xor r28,r0,r27 ; (BRINGUP) - See how much it changed
2241 rlwinm r28,r28,24,24,31 ; (BRINGUP)
2242 cmplwi r28,1 ; (BRINGUP)
2243
2244 ble++ qqq10 ; (BRINGUP)
2245
2246 mr r7,r0 ; (BRINGUP)
2247 li r0,1 ; (BRINGUP)
2248 stw r0,20(0) ; (BRINGUP)
2249 lis r0,hi16(Choke) ; (BRINGUP)
2250 ori r0,r0,lo16(Choke) ; (BRINGUP)
2251 sc ; (BRINGUP)
2252 #endif
2253
2254
2255 qqq10: addi r4,r4,4 ; (BRINGUP)
2256 stw r4,savesrr0+4(r13) ; (BRINGUP)
2257
2258 li r11,T_IN_VAIN ; (BRINGUP)
2259 b EatRupt ; (BRINGUP)
2260
2261 didnthit: ; (BRINGUP)
2262 #endif
2263 #if 0
2264 lwz r0,20(0) ; (BRINGUP)
2265 mr. r0,r0 ; (BRINGUP)
2266 beq++ opopop ; (BRINGUP)
2267 li r0,0 ; (BRINGUP)
2268 stw r0,20(0) ; (BRINGUP)
2269 lis r0,hi16(Choke) ; (BRINGUP)
2270 ori r0,r0,lo16(Choke) ; (BRINGUP)
2271 sc ; (BRINGUP)
2272 opopop:
2273 #endif
2274 lwz r0,savesrr1+4(r13) ; Get the MSR in use at exception time
2275 cmplwi cr1,r11,T_IN_VAIN ; Was it handled?
2276 rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
2277 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
2278 beq-- NoFamPf
2279 mfsprg r2,0 ; Get back per_proc
2280 lwz r1,spcFlags(r2) ; Load spcFlags
2281 rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
2282 cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode
2283 bne-- cr0,NoFamPf
2284 lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept
2285 li r5,0 ; Clear
2286 srwi r1,r11,2 ; divide r11 by 4
2287 oris r5,r5,0x8000 ; Set r5 to 0x80000000
2288 srw r1,r5,r1 ; Set bit for current exception
2289 and. r1,r1,r6 ; And current exception with the intercept mask
2290 beq++ NoFamPf ; Is it FAM intercept
2291 bl EXT(vmm_fam_pf)
2292 b EatRupt
2293
2294 NoFamPf: andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on
2295 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
2296 add r0,r0,r0 ; Get 0xFFFFFFFF00000000
2297 beq++ PassUpTrap ; Not on, normal case...
2298 ;
2299 ; Here is where we handle the "recovery mode" stuff.
2300 ; This is set by an emulation routine to trap any faults when it is fetching data or
2301 ; instructions.
2302 ;
2303 ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0
2304 ; and R1 to the DAR and DSISR, respectively.
2305 ;
2306 lwz r3,savesrr0(r13) ; Get the failing instruction address
2307 lwz r4,savesrr0+4(r13) ; Get the failing instruction address
2308 lwz r5,savecr(r13) ; Get the condition register
2309 or r4,r4,r0 ; Fill the high part with foxes
2310 lwz r0,savedar(r13) ; Get the DAR
2311 addic r4,r4,4 ; Skip failing instruction
2312 lwz r6,savedar+4(r13) ; Get the DAR
2313 addze r3,r3 ; Propagate carry
2314 rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed
2315 lwz r7,savedsisr(r13) ; Grab the DSISR
2316 stw r3,savesrr0(r13) ; Save resume address
2317 stw r4,savesrr0+4(r13) ; Save resume address
2318 stw r5,savecr(r13) ; And the resume CR
2319 stw r0,saver0(r13) ; Pass back the DAR
2320 stw r6,saver0+4(r13) ; Pass back the DAR
2321 stw r7,saver1+4(r13) ; Pass back the DSISR
2322 b EatRupt ; Resume emulated code
2323
2324 ;
2325 ; Here is where we handle the context switch firmware call. The old
2326 ; context has been saved. The new savearea is in kind of hokey, the high order
2327 ; half is stored in saver7 and the low half is in saver3. We will just
2328 ; muck around with the savearea pointers, and then join the exit routine
2329 ;
2330
2331 .align 5
2332
2333 conswtch:
2334 li r0,0xFFF ; Get page boundary
2335 mr r29,r13 ; Save the save
2336 andc r30,r13,r0 ; Round down to page boundary (64-bit safe)
2337 lwz r5,saver3+4(r13) ; Switch to the new savearea
2338 bf-- pf64Bitb,xcswNo64 ; Not 64-bit...
2339 lwz r6,saver7+4(r13) ; Get the high order half
2340 sldi r6,r6,32 ; Position high half
2341 or r5,r5,r6 ; Merge them
2342
2343 xcswNo64: lwz r30,SACvrswap+4(r30) ; get real to virtual translation
2344 mr r13,r5 ; Switch saveareas
2345 li r0,0 ; Clear this
2346 xor r27,r29,r30 ; Flip to virtual
2347 stw r0,saver3(r5) ; Push the new virtual savearea to the switch to routine
2348 stw r27,saver3+4(r5) ; Push the new virtual savearea to the switch to routine
2349 b EatRupt ; Start it up...
2350
2351 ;
2352 ; Handle machine check here.
2353 ;
2354 ; ?
2355 ;
2356
2357 .align 5
2358
2359 MachineCheck:
2360
2361 bt++ pf64Bitb,mck64 ; ?
2362
2363 lwz r27,savesrr1+4(r13) ; Pick up srr1
2364
2365 ;
2366 ; Check if the failure was in
2367 ; ml_probe_read. If so, this is expected, so modify the PC to
2368 ; ml_proble_read_mck and then eat the exception.
2369 ;
2370 lwz r30,savesrr0+4(r13) ; Get the failing PC
2371 lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part
2372 lis r27,hi16(EXT(ml_probe_read)) ; High order part
2373 ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part
2374 ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part
2375 cmplw r30,r28 ; Check highest possible
2376 cmplw cr1,r30,r27 ; Check lowest
2377 bge- PassUpTrap ; Outside of range
2378 blt- cr1,PassUpTrap ; Outside of range
2379 ;
2380 ; We need to fix up the BATs here because the probe
2381 ; routine messed them all up... As long as we are at it,
2382 ; fix up to return directly to caller of probe.
2383 ;
2384
2385 lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
2386 ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
2387
2388 lwz r30,0(r11) ; Pick up DBAT 0 high
2389 lwz r28,4(r11) ; Pick up DBAT 0 low
2390 lwz r27,8(r11) ; Pick up DBAT 1 high
2391 lwz r18,16(r11) ; Pick up DBAT 2 high
2392 lwz r11,24(r11) ; Pick up DBAT 3 high
2393
2394 sync
2395 mtdbatu 0,r30 ; Restore DBAT 0 high
2396 mtdbatl 0,r28 ; Restore DBAT 0 low
2397 mtdbatu 1,r27 ; Restore DBAT 1 high
2398 mtdbatu 2,r18 ; Restore DBAT 2 high
2399 mtdbatu 3,r11 ; Restore DBAT 3 high
2400 sync
2401
2402 lwz r28,savelr+4(r13) ; Get return point
2403 lwz r27,saver0+4(r13) ; Get the saved MSR
2404 li r30,0 ; Get a failure RC
2405 stw r28,savesrr0+4(r13) ; Set the return point
2406 stw r27,savesrr1+4(r13) ; Set the continued MSR
2407 stw r30,saver3+4(r13) ; Set return code
2408 b EatRupt ; Yum, yum, eat it all up...
2409
2410 ;
2411 ; 64-bit machine checks
2412 ;
2413
2414 mck64:
2415
2416 ;
2417 ; NOTE: WE NEED TO RETHINK RECOVERABILITY A BIT - radar 3167190
2418 ;
2419
2420 ld r23,savesrr0(r13) ; Grab the SRR0 in case we need bad instruction
2421 ld r20,savesrr1(r13) ; Grab the SRR1 so we can decode the thing
2422 lwz r21,savedsisr(r13) ; We might need this in a bit
2423 ld r22,savedar(r13) ; We might need this in a bit
2424
2425 lis r8,AsyMCKSrc ; Get the Async MCK Source register address
2426 mfsprg r19,2 ; Get the feature flags
2427 ori r8,r8,0x8000 ; Set to read data
2428 rlwinm. r0,r19,0,pfSCOMFixUpb,pfSCOMFixUpb ; Do we need to fix the SCOM data?
2429
2430 sync
2431
2432 mtspr scomc,r8 ; Request the MCK source
2433 mfspr r24,scomd ; Get the source
2434 mfspr r8,scomc ; Get back the status (we just ignore it)
2435 sync
2436 isync
2437
2438 lis r8,AsyMCKRSrc ; Get the Async MCK Source AND mask address
2439 li r9,0 ; Get and AND mask of 0
2440
2441 sync
2442
2443 mtspr scomd,r9 ; Set the AND mask to 0
2444 mtspr scomc,r8 ; Write the AND mask and clear conditions
2445 mfspr r8,scomc ; Get back the status (we just ignore it)
2446 sync
2447 isync
2448
2449 lis r8,cFIR ; Get the Core FIR register address
2450 ori r8,r8,0x8000 ; Set to read data
2451
2452 sync
2453
2454 mtspr scomc,r8 ; Request the Core FIR
2455 mfspr r25,scomd ; Get the source
2456 mfspr r8,scomc ; Get back the status (we just ignore it)
2457 sync
2458 isync
2459
2460 lis r8,cFIRrst ; Get the Core FIR AND mask address
2461
2462 sync
2463
2464 mtspr scomd,r9 ; Set the AND mask to 0
2465 mtspr scomc,r8 ; Write the AND mask and clear conditions
2466 mfspr r8,scomc ; Get back the status (we just ignore it)
2467 sync
2468 isync
2469
2470 lis r8,l2FIR ; Get the L2 FIR register address
2471 ori r8,r8,0x8000 ; Set to read data
2472
2473 sync
2474
2475 mtspr scomc,r8 ; Request the L2 FIR
2476 mfspr r26,scomd ; Get the source
2477 mfspr r8,scomc ; Get back the status (we just ignore it)
2478 sync
2479 isync
2480
2481 lis r8,l2FIRrst ; Get the L2 FIR AND mask address
2482
2483 sync
2484
2485 mtspr scomd,r9 ; Set the AND mask to 0
2486 mtspr scomc,r8 ; Write the AND mask and clear conditions
2487 mfspr r8,scomc ; Get back the status (we just ignore it)
2488 sync
2489 isync
2490
2491 lis r8,busFIR ; Get the Bus FIR register address
2492 ori r8,r8,0x8000 ; Set to read data
2493
2494 sync
2495
2496 mtspr scomc,r8 ; Request the Bus FIR
2497 mfspr r27,scomd ; Get the source
2498 mfspr r8,scomc ; Get back the status (we just ignore it)
2499 sync
2500 isync
2501
2502 lis r8,busFIRrst ; Get the Bus FIR AND mask address
2503
2504 sync
2505
2506 mtspr scomd,r9 ; Set the AND mask to 0
2507 mtspr scomc,r8 ; Write the AND mask and clear conditions
2508 mfspr r8,scomc ; Get back the status (we just ignore it)
2509 sync
2510 isync
2511
2512 ; Note: bug in early chips where scom reads are shifted right by 1. We fix that here.
2513 ; Also note that we will lose bit 63
2514
2515 beq++ mckNoFix ; No fix up is needed
2516 sldi r24,r24,1 ; Shift left 1
2517 sldi r25,r25,1 ; Shift left 1
2518 sldi r26,r26,1 ; Shift left 1
2519 sldi r27,r27,1 ; Shift left 1
2520
2521 mckNoFix: std r24,savexdat0(r13) ; Save the MCK source in case we pass the error
2522 std r25,savexdat1(r13) ; Save the Core FIR in case we pass the error
2523 std r26,savexdat2(r13) ; Save the L2 FIR in case we pass the error
2524 std r27,savexdat3(r13) ; Save the BUS FIR in case we pass the error
2525
2526 rlwinm. r0,r20,0,mckIFUE-32,mckIFUE-32 ; Is this some kind of uncorrectable?
2527 bne mckUE ; Yeah...
2528
2529 rlwinm. r0,r20,0,mckLDST-32,mckLDST-32 ; Some kind of load/store error?
2530 bne mckHandleLDST ; Yes...
2531
2532 rldicl. r0,r20,46,62 ; Get the error cause code
2533 beq mckNotSure ; We need some more checks for this one...
2534
2535 cmplwi r0,2 ; Check for TLB parity error
2536 blt mckSLBparity ; This is an SLB parity error...
2537 bgt mckhIFUE ; This is an IFetch tablewalk reload UE...
2538
2539 ; IFetch TLB parity error
2540
2541 isync
2542 tlbiel r23 ; Locally invalidate TLB entry for iaddr
2543 sync ; Wait for it
2544 b ceMck ; All recovered...
2545
2546 ; SLB parity error. This could be software caused. We get one if there is
2547 ; more than 1 valid SLBE with a matching ESID. That one we do not want to
2548 ; try to recover from. Search for it and if we get it, panic.
2549
2550 mckSLBparity:
2551 crclr cr0_eq ; Make sure we are not equal so we take correct exit
2552
2553 la r3,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2554 li r5,0 ; Start with index 0
2555
2556 mckSLBck: la r4,emvr0(r2) ; Use this to keep track of valid ESIDs we find
2557 slbmfee r6,r5 ; Get the next SLBE
2558 andis. r0,r6,0x0800 ; See if valid bit is on
2559 beq mckSLBnx ; Skip invalid and go to next
2560
2561 mckSLBck2: cmpld r4,r3 ; Have we reached the end of the table?
2562 beq mckSLBne ; Yes, go enter this one...
2563 ld r7,0(r4) ; Pick up the saved ESID
2564 cmpld r6,r7 ; Is this a match?
2565 beq mckSLBrec ; Whoops, I did bad, recover and pass up...
2566 addi r4,r4,8 ; Next table entry
2567 b mckSLBck2 ; Check the next...
2568
2569 mckSLBnx: addi r5,r5,1 ; Point to next SLBE
2570 cmplwi r5,64 ; Have we checked all of them?
2571 bne++ mckSLBck ; Not yet, check again...
2572 b mckSLBrec ; We looked at them all, go recover...
2573
2574 mckSLBne: std r6,0(r3) ; Save this ESID
2575 addi r3,r3,8 ; Point to the new slot
2576 b mckSLBnx ; Go do the next SLBE...
2577
2578 ; Recover an SLB error
2579
2580 mckSLBrec: li r0,0 ; Set an SLB slot index of 0
2581 slbia ; Trash all SLB entries (except for entry 0 that is)
2582 slbmfee r7,r0 ; Get the entry that is in SLB index 0
2583 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
2584 slbie r7 ; Invalidate it
2585
2586 li r3,0 ; Set the first SLBE
2587
2588 mckSLBclr: slbmte r0,r3 ; Clear the whole entry to 0s
2589 addi r3,r3,1 ; Bump index
2590 cmplwi cr1,r3,64 ; Have we done them all?
2591 bne++ cr1,mckSLBclr ; Yup....
2592
2593 sth r3,ppInvSeg(r2) ; Store non-zero to trigger SLB reload
2594 bne++ ceMck ; This was not a programming error, all recovered...
2595 b ueMck ; Pass the software error up...
2596
2597 ;
2598 ; Handle a load/store unit error. We need to decode the DSISR
2599 ;
2600
2601 mckHandleLDST:
2602 rlwinm. r0,r21,0,mckL1DCPE,mckL1DCPE ; An L1 data cache parity error?
2603 bne++ mckL1D ; Yeah, we dealt with this back in the vector...
2604
2605 rlwinm. r0,r21,0,mckL1DTPE,mckL1DTPE ; An L1 tag error?
2606 bne++ mckL1T ; Yeah, we dealt with this back in the vector...
2607
2608 rlwinm. r0,r21,0,mckUEdfr,mckUEdfr ; Is the a "deferred" UE?
2609 bne mckDUE ; Yeah, go see if expected...
2610
2611 rlwinm. r0,r21,0,mckUETwDfr,mckUETwDfr ; Is the a "deferred" tablewalk UE?
2612 bne mckDTW ; Yeah, no recovery...
2613
2614 rlwinm. r0,r21,0,mckSLBPE,mckSLBPE ; SLB parity error?
2615 bne mckSLBparity ; Yeah, go attempt recovery....
2616
2617 ; This is a recoverable D-ERAT or TLB error
2618
2619 la r9,hwMckERCPE(r2) ; Get DERAT parity error count
2620
2621 mckInvDAR: isync
2622 tlbiel r22 ; Locally invalidate the TLB entry
2623 sync
2624
2625 lwz r21,0(r9) ; Get count
2626 addi r21,r21,1 ; Count this one
2627 stw r21,0(r9) ; Stick it back
2628
2629 b ceMck ; All recovered...
2630
2631 ;
2632 ; When we come here, we are not quite sure what the error is. We need to
2633 ; dig a bit further.
2634 ;
2635 ; R24 is interrupt source
2636 ; R25 is Core FIR
2637 ;
2638 ; Note that both have been cleared already.
2639 ;
2640
2641 mckNotSure:
2642 rldicl. r0,r24,AsyMCKfir+1,63 ; Something in the FIR?
2643 bne-- mckFIR ; Yup, go check some more...
2644
2645 rldicl. r0,r24,AsyMCKhri+1,63 ; Hang recovery?
2646 bne-- mckHangRcvr ; Yup...
2647
2648 rldicl. r0,r24,AsyMCKext+1,63 ; External signal?
2649 bne-- mckExtMck ; Yup...
2650
2651 ;
2652 ; We really do not know what this one is or what to do with it...
2653 ;
2654
2655 mckUnk: lwz r21,hwMckUnk(r2) ; Get unknown error count
2656 addi r21,r21,1 ; Count it
2657 stw r21,hwMckUnk(r2) ; Stuff it
2658 b ueMck ; Go south, young man...
2659
2660 ;
2661 ; Hang recovery. This is just a notification so we only count.
2662 ;
2663
2664 mckHangRcrvr:
2665 lwz r21,hwMckHang(r2) ; Get hang recovery count
2666 addi r21,r21,1 ; Count this one
2667 stw r21,hwMckHang(r2) ; Stick it back
2668 b ceMck ; All recovered...
2669
2670 ;
2671 ; Externally signaled MCK. No recovery for the moment, but we this may be
2672 ; where we handle ml_probe_read problems eventually.
2673 ;
2674 mckExtMck:
2675 lwz r21,hwMckHang(r2) ; Get hang recovery count
2676 addi r21,r21,1 ; Count this one
2677 stw r21,hwMckHang(r2) ; Stick it back
2678 b ceMck ; All recovered...
2679
2680 ;
2681 ; Machine check cause is in a FIR. Suss it out here.
2682 ; Core FIR is in R25 and has been cleared in HW.
2683 ;
2684
2685 mckFIR: rldicl. r0,r25,cFIRICachePE+1,63 ; I-Cache parity error?
2686 la r19,hwMckICachePE(r2) ; Point to counter
2687 bne mckInvICache ; Go invalidate I-Cache...
2688
2689 rldicl. r0,r25,cFIRITagPE0+1,63 ; I-Cache tag parity error?
2690 la r19,hwMckITagPE(r2) ; Point to counter
2691 bne mckInvICache ; Go invalidate I-Cache...
2692
2693 rldicl. r0,r25,cFIRITagPE1+1,63 ; I-Cache tag parity error?
2694 la r19,hwMckITagPE(r2) ; Point to counter
2695 bne mckInvICache ; Go invalidate I-Cache...
2696
2697 rldicl. r0,r25,cFIRIEratPE+1,63 ; IERAT parity error?
2698 la r19,hwMckIEratPE(r2) ; Point to counter
2699 bne mckInvERAT ; Go invalidate ERATs...
2700
2701 rldicl. r0,r25,cFIRIFUL2UE+1,63 ; IFetch got L2 UE?
2702 bne mckhIFUE ; Go count and pass up...
2703
2704 rldicl. r0,r25,cFIRDCachePE+1,63 ; D-Cache PE?
2705 bne mckL1D ; Handled, just go count...
2706
2707 rldicl. r0,r25,cFIRDTagPE+1,63 ; D-Cache tag PE?
2708 bne mckL1T ; Handled, just go count...
2709
2710 rldicl. r0,r25,cFIRDEratPE+1,63 ; DERAT PE?
2711 la r19,hwMckDEratPE(r2) ; Point to counter
2712 bne mckInvERAT ; Go invalidate ERATs...
2713
2714 rldicl. r0,r25,cFIRTLBPE+1,63 ; TLB PE?
2715 la r9,hwMckTLBPE(r2) ; Get TLB parity error count
2716 bne mckInvDAR ; Go recover...
2717
2718 rldicl. r0,r25,cFIRSLBPE+1,63 ; SLB PE?
2719 bne mckSLBparity ; Cope with it...
2720
2721 b mckUnk ; Have not a clue...
2722
2723 ;
2724 ; General recovery for I-Cache errors. Just flush it completely.
2725 ;
2726
2727 .align 7 ; Force into cache line
2728
2729 mckInvICache:
2730 lis r0,0x0080 ; Get a 0x0080 (bit 9 >> 32)
2731 mfspr r21,hid1 ; Get the current HID1
2732 sldi r0,r0,32 ; Get the "forced ICBI match" bit
2733 or r0,r0,r21 ; Set forced match
2734
2735 isync
2736 mtspr hid1,r0 ; Stick it
2737 mtspr hid1,r0 ; Stick it again
2738 isync
2739
2740 li r6,0 ; Start at 0
2741
2742 mckIcbi: icbi 0,r6 ; Kill I$
2743 addi r6,r6,128 ; Next line
2744 andis. r5,r6,1 ; Have we done them all?
2745 beq++ mckIcbi ; Not yet...
2746
2747 isync
2748 mtspr hid1,r21 ; Restore original HID1
2749 mtspr hid1,r21 ; Stick it again
2750 isync
2751
2752 lwz r5,0(r19) ; Get the counter
2753 addi r5,r5,1 ; Count it
2754 stw r5,0(r19) ; Stuff it back
2755 b ceMck ; All recovered...
2756
2757
2758 ; General recovery for ERAT problems - handled in exception vector already
2759
2760 mckInvERAT: lwz r21,0(r19) ; Get the exception count spot
2761 addi r21,r21,1 ; Count this one
2762 stw r21,0(r19) ; Save count
2763 b ceMck ; All recovered...
2764
2765 ; General hang recovery - this is a notification only, just count.
2766
2767 mckHangRcvr:
2768 lwz r21,hwMckHang(r2) ; Get hang recovery count
2769 addi r21,r21,1 ; Count this one
2770 stw r21,hwMckHang(r2) ; Stick it back
2771 b ceMck ; All recovered...
2772
2773
2774 ;
2775 ; These are the uncorrectable errors, just count them then pass it along.
2776 ;
2777
2778 mckUE: lwz r21,hwMckUE(r2) ; Get general uncorrectable error count
2779 addi r21,r21,1 ; Count it
2780 stw r21,hwMckUE(r2) ; Stuff it
2781 b ueMck ; Go south, young man...
2782
2783 mckhIFUE: lwz r21,hwMckIUEr(r2) ; Get I-Fetch TLB reload uncorrectable error count
2784 addi r21,r21,1 ; Count it
2785 stw r21,hwMckIUEr(r2) ; Stuff it
2786 b ueMck ; Go south, young man...
2787
2788 mckDUE: lwz r21,hwMckDUE(r2) ; Get deferred uncorrectable error count
2789 addi r21,r21,1 ; Count it
2790 stw r21,hwMckDUE(r2) ; Stuff it
2791
2792 ;
2793 ; Right here is where we end up after a failure on a ml_probe_read_64.
2794 ; We will check if that is the case, and if so, fix everything up and
2795 ; return from it.
2796
2797 lis r8,hi16(EXT(ml_probe_read_64)) ; High of start
2798 lis r9,hi16(EXT(ml_probe_read_mck_64)) ; High of end
2799 ori r8,r8,lo16(EXT(ml_probe_read_64)) ; Low of start
2800 ori r9,r9,lo16(EXT(ml_probe_read_mck_64)) ; Low of end
2801 cmpld r23,r8 ; Too soon?
2802 cmpld cr1,r23,r9 ; Too late?
2803
2804 cror cr0_lt,cr0_lt,cr1_gt ; Too soon or too late?
2805 ld r3,saver12(r13) ; Get the original MSR
2806 ld r5,savelr(r13) ; Get the return address
2807 li r4,0 ; Get fail code
2808 blt-- ueMck ; This is a normal machine check, just pass up...
2809 std r5,savesrr0(r13) ; Set the return MSR
2810
2811 std r3,savesrr1(r13) ; Set the return address
2812 std r4,saver3(r13) ; Set failure return code
2813 b ceMck ; All recovered...
2814
2815 mckDTW: lwz r21,hwMckDTW(r2) ; Get deferred tablewalk uncorrectable error count
2816 addi r21,r21,1 ; Count it
2817 stw r21,hwMckDTW(r2) ; Stuff it
2818 b ueMck ; Go south, young man...
2819
2820 mckL1D: lwz r21,hwMckL1DPE(r2) ; Get data cache parity error count
2821 addi r21,r21,1 ; Count it
2822 stw r21,hwMckL1DPE(r2) ; Stuff it
2823 b ceMck ; All recovered...
2824
2825 mckL1T: lwz r21,hwMckL1TPE(r2) ; Get TLB parity error count
2826 addi r21,r21,1 ; Count it
2827 stw r21,hwMckL1TPE(r2) ; Stuff it
2828
2829 ceMck: lwz r21,mckFlags(0) ; Get the flags
2830 li r0,1 ; Set the recovered flag before passing up
2831 rlwinm. r21,r21,0,31,31 ; Check if we want to log recoverables
2832 stw r0,savemisc3(r13) ; Set it
2833 beq++ EatRupt ; No log of recoverables wanted...
2834 b PassUpTrap ; Go up and log error...
2835
2836 ueMck: li r0,0 ; Set the unrecovered flag before passing up
2837 stw r0,savemisc3(r13) ; Set it
2838 b PassUpTrap ; Go up and log error and probably panic
2839
2840 ;
2841 ; We come here to handle program exceptions
2842 ;
2843 ; When the program check is a trap instruction and it happens when
2844 ; we are executing injected code, we need to check if it is an exit trap.
2845 ; If it is, we need to populate the current savearea with some of the context from
2846 ; the saved pre-inject savearea. This is needed because the current savearea will be
2847 ; tossed as part of the pass up code. Additionally, because we will not be nullifying
2848 ; the emulated instruction as we do with any other exception.
2849 ;
2850
2851 .align 5
2852
2853 ProgramChk: lwz r5,savesrr1+4(r13) ; Get the interrupt SRR1
2854 lwz r3,ijsave(r2) ; Get the inject savearea top
2855 lwz r4,ijsave+4(r2) ; And get the bottom of the inject savearea pointer
2856 rlwimi r5,r5,15,31,31 ; Scoot trap flag down to a spare bit
2857 rlwinm r3,r3,0,1,0 ; Copy low 32 bits of to top 32
2858 li r0,0x0023 ; Get bits that match scooted trap flag, IR, and RI
2859 and r0,r5,r0 ; Clear any extra SRR1 bits
2860 rlwimi. r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits and see if ijsave is 0
2861 cmplwi cr1,r0,1 ; Make sure we were IR off, RI off, and got a trap exception
2862 crandc cr0_eq,cr1_eq,cr0_eq ; If we are injecting, ijsave will be non-zero and we had the trap bit set
2863 mfsrr0 r4 ; Get the PC
2864 bne++ cr0,mustem ; This is not an injection exit...
2865
2866 lwz r4,0(r4) ; Get the trap instruction
2867 lis r5,hi16(ijtrap) ; Get high half of inject exit trap
2868 ori r5,r5,lo16(ijtrap) ; And the low half
2869 cmplw r4,r5 ; Correct trap instruction?
2870 bne mustem ; No, not inject exit...
2871
2872 lwz r4,savesrr0(r3) ; Get the original SRR0
2873 lwz r5,savesrr0+4(r3) ; And the rest of it
2874 lwz r6,savesrr1(r3) ; Get the original SRR1
2875 stw r4,savesrr0(r13) ; Set the new SRR0 to the original
2876 lwz r4,savesrr1+4(r13) ; Get the bottom of the new SRR1
2877 lwz r7,savesrr1+4(r3) ; Get the bottom of the original SRR1
2878 li r11,T_INJECT_EXIT ; Set an inject exit exception
2879 stw r5,savesrr0+4(r13) ; Set the new bottom of SRR0 to the original
2880 rlwimi r7,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Make sure we retain the current floating point enable bit
2881 stw r6,savesrr1(r13) ; Save the top half of the original SRR1
2882 sth r7,savesrr1+6(r13) ; And the last bottom
2883 stw r11,saveexception(r13) ; Set the new the exception code
2884 b PassUpTrap ; Go pass it on up...
2885
2886 mustem: b EXT(Emulate) ; Go try to emulate this one...
2887
2888
2889 /*
2890 * Here's where we come back from some instruction emulator. If we come back with
2891 * T_IN_VAIN, the emulation is done and we should just reload state and directly
2892 * go back to the interrupted code. Otherwise, we'll check to see if
2893 * we need to redrive with a different interrupt, i.e., DSI.
2894 * Note that this we are actually not redriving the rupt, rather changing it
2895 * into a different one. Thus we clear the redrive bit.
2896 */
2897
2898 .align 5
2899 .globl EXT(EmulExit)
2900
2901 LEXT(EmulExit)
2902
2903 cmplwi cr1,r11,T_IN_VAIN ; Was it emulated?
2904 lis r1,hi16(SAVredrive) ; Get redrive request
2905 beq++ cr1,EatRupt ; Yeah, just blast back to the user...
2906 lwz r4,SAVflags(r13) ; Pick up the flags
2907
2908 and. r0,r4,r1 ; Check if redrive requested
2909
2910 beq++ PassUpTrap ; No redrive, just keep on going...
2911
2912 b Redrive ; Redrive the exception...
2913
2914 ;
2915 ; Jump into main handler code switching on VM at the same time.
2916 ;
2917 ; We assume kernel data is mapped contiguously in physical
2918 ; memory, otherwise we would need to switch on (at least) virtual data.
2919 ; SRs are already set up.
2920 ;
2921
2922 .align 5
2923
2924 PassUpTrap: lis r20,hi16(EXT(thandler)) ; Get thandler address
2925 ori r20,r20,lo16(EXT(thandler)) ; Get thandler address
2926 b PassUp ; Go pass it up...
2927
2928 PassUpRupt: lis r20,hi16(EXT(ihandler)) ; Get ihandler address
2929 ori r20,r20,lo16(EXT(ihandler)) ; Get ihandler address
2930 b PassUp ; Go pass it up...
2931
2932 .align 5
2933
2934 PassUpFPU: lis r20,hi16(EXT(fpu_switch)) ; Get FPU switcher address
2935 ori r20,r20,lo16(EXT(fpu_switch)) ; Get FPU switcher address
2936 b PassUp ; Go pass it up...
2937
2938 .align 5
2939
2940 PassUpVMX: lis r20,hi16(EXT(vec_switch)) ; Get VMX switcher address
2941 ori r20,r20,lo16(EXT(vec_switch)) ; Get VMX switcher address
2942 bt++ featAltivec,PassUp ; We have VMX on this CPU...
2943 li r11,T_PROGRAM ; Say that it is a program exception
2944 li r20,8 ; Set invalid instruction
2945 stw r11,saveexception(r13) ; Set the new the exception code
2946 sth r20,savesrr1+4(r13) ; Set the invalid instruction SRR code
2947
2948 b PassUpTrap ; Go pass it up...
2949
2950 .align 5
2951
2952 PassUpAbend:
2953 lis r20,hi16(EXT(chandler)) ; Get choke handler address
2954 ori r20,r20,lo16(EXT(chandler)) ; Get choke handler address
2955 b PassUp ; Go pass it up...
2956
2957 .align 5
2958
2959 PassUp:
2960 mfsprg r29,0 ; Get the per_proc block back
2961
2962 cmplwi cr1,r11,T_INJECT_EXIT ; Are we exiting from an injection?
2963 lwz r3,ijsave(r29) ; Get the inject savearea top
2964 lwz r4,ijsave+4(r29) ; And get the bottom of the inject savearea pointer
2965 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
2966 rlwimi. r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits and see if ijsave is 0
2967 beq++ notaninjct ; Skip tossing savearea if no injection...
2968
2969 beq-- cr1,nonullify ; Have not finished the instruction, go nullify it...
2970
2971 lwz r4,savesrr1+4(r3) ; Get the interrupt modifiers from the original SRR1
2972 lwz r5,savesrr1+4(r13) ; Get the interrupt modifiers from the new SRR1
2973 lwz r6,savedar(r13) ; Get the top of the DAR
2974 rlwimi r4,r5,0,0,15 ; copy the new top to the original SRR1
2975 lwz r7,savedar+4(r13) ; Get the bottom of the DAR
2976 rlwimi r4,r5,0,MSR_FP_BIT,MSR_FP_BIT ; Copy the new FP enable bit into the old SRR1
2977 stw r4,savesrr1+4(r3) ; Save the updated SRR1
2978 lwz r5,savedsisr(r13) ; Grab the new DSISR
2979
2980 mr r4,r13 ; Save the new savearea pointer
2981 mr r13,r3 ; Point to the old savearea we are keeping
2982 stw r6,savedar(r13) ; Save top of new DAR
2983 stw r7,savedar+4(r13) ; Save bottom of new DAR
2984 stw r5,savedsisr(r13) ; Set the new DSISR
2985 stw r11,saveexception(r13) ; Set the new exception code
2986 mr r3,r4 ; Point to the new savearea in order to toss it
2987
2988 nonullify: li r0,0 ; Get a zero
2989 stw r0,ijsave(r29) ; Clear the pointer to the saved savearea
2990 stw r0,ijsave+4(r29) ; Clear the pointer to the saved savearea
2991
2992 bl EXT(save_ret_phys) ; Dump that pesky extra savearea
2993
2994 notaninjct: lwz r10,SAVflags(r13) ; Pick up the flags
2995
2996 li r0,0xFFF ; Get a page mask
2997 li r2,MASK(MSR_BE)|MASK(MSR_SE) ; Get the mask to save trace bits
2998 andc r5,r13,r0 ; Back off to the start of savearea block
2999 mfmsr r3 ; Get our MSR
3000 rlwinm r10,r10,0,SAVredriveb+1,SAVredriveb-1 ; Clear the redrive before we pass it up
3001 li r21,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value
3002 and r3,r3,r2 ; Clear all but trace
3003 lwz r5,SACvrswap+4(r5) ; Get real to virtual conversion
3004 or r21,r21,r3 ; Keep the trace bits if they are on
3005 stw r10,SAVflags(r13) ; Set the flags with the cleared redrive flag
3006
3007 xor r4,r13,r5 ; Pass up the virtual address of context savearea
3008 rlwinm r4,r4,0,0,31 ; Clean top half of virtual savearea if 64-bit
3009
3010 mr r3,r21 ; Pass in the MSR we will go to
3011 bl EXT(switchSegs) ; Go handle the segment registers/STB
3012
3013 lwz r3,saveexception(r13) ; Recall the exception code
3014
3015 mtsrr0 r20 ; Set up the handler address
3016 mtsrr1 r21 ; Set up our normal MSR value
3017
3018 bt++ pf64Bitb,puLaunch ; Handle 64-bit machine...
3019
3020 rfi ; Launch the exception handler
3021
3022 puLaunch: rfid ; Launch the exception handler
3023
3024 /*
3025 * This routine is the main place where we return from an interruption.
3026 *
3027 * This is also where we release the quickfret list. These are saveareas
3028 * that were released as part of the exception exit path in hw_exceptions.
3029 * In order to save an atomic operation (which actually will not work
3030 * properly on a 64-bit machine) we use holdQFret to indicate that the list
3031 * is in flux and should not be looked at here. This comes into play only
3032 * when we take a PTE miss when we are queuing a savearea onto qfret.
3033 * Quite rare but could happen. If the flag is set, this code does not
3034 * release the list and waits until next time.
3035 *
3036 * All we need to remember here is that R13 must point to the savearea
3037 * that has the context we need to load up. Translation and interruptions
3038 * must be disabled.
3039 *
3040 * This code always loads the context in the savearea pointed to
3041 * by R13. In the process, it throws away the savearea. If there
3042 * is any tomfoolery with savearea stacks, it must be taken care of
3043 * before we get here.
3044 *
3045 */
3046
3047 .align 5
3048
3049 EatRupt: mfsprg r29,0 ; Get the per_proc block back
3050 mr r31,r13 ; Move the savearea pointer to the far end of the register set
3051 mfsprg r27,2 ; Get the processor features
3052
3053 lwz r3,holdQFret(r29) ; Get the release hold off flag
3054
3055 bt++ pf64Bitb,eat64a ; Skip down to the 64-bit version of this
3056
3057 ;
3058 ; This starts the 32-bit version
3059 ;
3060
3061 mr. r3,r3 ; Should we hold off the quick release?
3062 lwz r30,quickfret+4(r29) ; Pick up the quick fret list, if any
3063 la r21,saver0(r31) ; Point to the first thing we restore
3064 bne- ernoqfret ; Hold off set, do not release just now...
3065
3066 erchkfret: mr. r3,r30 ; Any savearea to quickly release?
3067 beq+ ernoqfret ; No quickfrets...
3068 lwz r30,SAVprev+4(r30) ; Chain back now
3069
3070 bl EXT(save_ret_phys) ; Put it on the free list
3071 stw r30,quickfret+4(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
3072 b erchkfret ; Try the next one...
3073
3074 .align 5
3075
3076 ernoqfret:
3077 lwz r30,SAVflags(r31) ; Pick up the flags
3078 lis r0,hi16(SAVinject) ; Get inject flag
3079 dcbt 0,r21 ; Touch in the first thing we need
3080
3081 ;
3082 ; Here we release the savearea.
3083 ;
3084 ; Important!!!! The savearea is released before we are done with it. When the
3085 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
3086 ; will trim the list, making the extra saveareas allocatable by another processor
3087 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
3088 ; we could be very, very unhappy. The code there always queues the "just released"
3089 ; savearea to the head of the local list. Then, if it needs to trim, it will
3090 ; start with the SECOND savearea, leaving ours intact.
3091 ;
3092 ; If we are going to inject code here, we must not toss the savearea because
3093 ; we will continue to use it. The code stream to inject is in it and we
3094 ; use it to hold the pre-inject context so that we can merge that with the
3095 ; post-inject context. The field ijsave in the per-proc is used to point to the savearea.
3096 ;
3097 ; Note that we will NEVER pass an interrupt up without first dealing with this savearea.
3098 ;
3099 ; All permanent interruptions (i.e., not denorm, alignment, or handled page and segment faults)
3100 ; will nullify any injected code and pass the interrupt up in the original savearea. A normal
3101 ; inject completion will merge the original context into the new savearea and pass that up.
3102 ;
3103 ; Note that the following code which sets up the injection will only be executed when
3104 ; SAVinject is set. That means that if will not run if we are returning from an alignment
3105 ; or denorm exception, or from a handled page or segment fault.
3106 ;
3107
3108 andc r0,r30,r0 ; Clear the inject flag
3109 cmplw cr4,r0,r30 ; Remember if we need to inject
3110 mr r3,r31 ; Get the exiting savearea in parm register
3111 beq+ cr4,noinject ; No, we are not going to inject instructions...
3112
3113 stw r0,SAVflags(r31) ; Yes we are, clear the request...
3114
3115 lhz r26,PP_CPU_NUMBER(r29) ; Get the cpu number
3116 lwz r25,saveinstr(r31) ; Get the instruction count
3117 la r3,saveinstr+4(r31) ; Point to the instruction stream
3118 slwi r26,r26,6 ; Get offset to the inject code stream for this processor
3119 li r5,0 ; Get the current instruction offset
3120 ori r26,r26,lo16(EXT(ijcode)) ; Get the base of the inject buffer for this processor (always < 64K)
3121 slwi r25,r25,2 ; Multiply by 4
3122
3123 injctit: lwzx r6,r5,r3 ; Pick up the instruction
3124 stwx r6,r5,r26 ; Inject into code buffer
3125 addi r5,r5,4 ; Bump offset
3126 cmplw r5,r25 ; Have we hit the end?
3127 blt- injctit ; Continue until we have copied all...
3128
3129 lis r3,0x0FFF ; Build our magic trap
3130 ori r3,r3,0xC9C9 ; Build our magic trap
3131 stw r31,ijsave+4(r29) ; Save the original savearea for injection
3132 stwx r3,r5,r26 ; Save the magic trap
3133
3134 li r3,32 ; Get cache line size
3135 dcbf 0,r26 ; Flush first line
3136 dcbf r3,r26 ; And the second
3137 sync ; Hang on until it's done
3138
3139 icbi 0,r26 ; Flush instructions in the first line
3140 icbi r3,r26 ; And the second
3141 isync ; Throw anything stale away
3142 sync ; Hang on until it's done
3143 b injected ; Skip the savearea release...
3144
3145 noinject: bl EXT(save_ret_phys) ; Put old savearea on the free list
3146
3147 injected: lwz r3,savesrr1+4(r31) ; Pass in the MSR we are going to
3148 bl EXT(switchSegs) ; Go handle the segment registers/STB
3149
3150 li r3,savesrr1+4 ; Get offset to the srr1 value
3151 lwarx r8,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away)
3152 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
3153
3154 li r21,emfp0 ; Point to the fp savearea
3155 stwcx. r8,r3,r31 ; Blow away any reservations we hold
3156
3157 lwz r25,savesrr0+4(r31) ; Get the SRR0 to use
3158
3159 la r28,saver4(r31) ; Point to the 32-byte line with r4-r7
3160 dcbz r21,r29 ; Clear a work area
3161 lwz r0,saver0+4(r31) ; Restore R0
3162 dcbt 0,r28 ; Touch in r4-r7
3163 lwz r1,saver1+4(r31) ; Restore R1
3164
3165 beq+ cr4,noinject2 ; No code injection here...
3166
3167 ;
3168 ; If we are injecting, we need to stay in supervisor state with instruction
3169 ; address translation off. We also need to have as few potential interruptions as
3170 ; possible. Therefore, we turn off external interruptions and tracing (which doesn't
3171 ; make much sense anyway).
3172 ;
3173 ori r8,r8,lo16(ijemoff) ; Force the need-to-be-off bits on
3174 mr r25,r26 ; Get the injected code address
3175 xori r8,r8,lo16(ijemoff) ; Turn off all of the need-to-be-off bits
3176
3177 noinject2: lwz r2,saver2+4(r31) ; Restore R2
3178 la r28,saver8(r31) ; Point to the 32-byte line with r8-r11
3179 lwz r3,saver3+4(r31) ; Restore R3
3180 andis. r6,r27,hi16(pfAltivec) ; Do we have altivec on the machine?
3181 dcbt 0,r28 ; touch in r8-r11
3182 lwz r4,saver4+4(r31) ; Restore R4
3183 la r28,saver12(r31) ; Point to the 32-byte line with r12-r15
3184 mtsrr0 r25 ; Restore the SRR0 now
3185 lwz r5,saver5+4(r31) ; Restore R5
3186 mtsrr1 r8 ; Restore the SRR1 now
3187 lwz r6,saver6+4(r31) ; Restore R6
3188
3189 dcbt 0,r28 ; touch in r12-r15
3190 la r28,saver16(r31)
3191
3192 lwz r7,saver7+4(r31) ; Restore R7
3193 lwz r8,saver8+4(r31) ; Restore R8
3194 lwz r9,saver9+4(r31) ; Restore R9
3195
3196 dcbt 0,r28 ; touch in r16-r19
3197 la r28,saver20(r31)
3198
3199 lwz r10,saver10+4(r31) ; Restore R10
3200 lwz r11,saver11+4(r31) ; Restore R11
3201
3202 dcbt 0,r28 ; touch in r20-r23
3203 la r28,savevscr(r31) ; Point to the status area
3204
3205 lwz r12,saver12+4(r31) ; Restore R12
3206 lwz r13,saver13+4(r31) ; Restore R13
3207
3208 la r14,savectr+4(r31)
3209 dcbt 0,r28 ; Touch in VSCR and FPSCR
3210 dcbt 0,r14 ; touch in CTR, DAR, DSISR, VRSAVE, and Exception code
3211
3212 lwz r26,next_savearea+4(r29) ; Get the exception save area
3213 la r28,saver24(r31)
3214
3215 lwz r14,saver14+4(r31) ; Restore R14
3216 lwz r15,saver15+4(r31) ; Restore R15
3217
3218
3219 stfd f0,emfp0(r29) ; Save FP0
3220 lwz r27,savevrsave(r31) ; Get the vrsave
3221 dcbt 0,r28 ; touch in r24-r27
3222 la r28,savevscr(r31) ; Point to the status area
3223 lfd f0,savefpscrpad(r31) ; Get the fpscr
3224 la r22,saver28(r31)
3225 mtfsf 0xFF,f0 ; Restore fpscr
3226 lfd f0,emfp0(r29) ; Restore the used register
3227
3228 beq noavec3 ; No Altivec on this CPU...
3229
3230 stvxl v0,r21,r29 ; Save a vector register
3231 lvxl v0,0,r28 ; Get the vector status
3232 mtspr vrsave,r27 ; Set the vrsave
3233 mtvscr v0 ; Set the vector status
3234 lvxl v0,r21,r29 ; Restore work vector register
3235
3236 noavec3: dcbt 0,r22 ; touch in r28-r31
3237
3238 lwz r23,spcFlags(r29) ; Get the special flags from per_proc
3239 la r17,savesrr0(r31)
3240 la r26,saver0(r26) ; Point to the first part of the next savearea
3241 dcbt 0,r17 ; touch in SRR0, SRR1, CR, XER, LR
3242 lhz r28,pfrptdProc(r29) ; Get the reported processor type
3243
3244 lwz r16,saver16+4(r31) ; Restore R16
3245 lwz r17,saver17+4(r31) ; Restore R17
3246 lwz r18,saver18+4(r31) ; Restore R18
3247 lwz r19,saver19+4(r31) ; Restore R19
3248 lwz r20,saver20+4(r31) ; Restore R20
3249 lwz r21,saver21+4(r31) ; Restore R21
3250 lwz r22,saver22+4(r31) ; Restore R22
3251
3252 cmpwi cr1,r28,CPU_SUBTYPE_POWERPC_750 ; G3?
3253
3254 dcbz 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
3255
3256 andis. r23,r23,hi16(perfMonitor) ; Is the performance monitor enabled?
3257 lwz r23,saver23+4(r31) ; Restore R23
3258 cmpwi cr2,r28,CPU_SUBTYPE_POWERPC_7400 ; Yer standard G4?
3259 lwz r24,saver24+4(r31) ; Restore R24
3260 lwz r25,saver25+4(r31) ; Restore R25
3261 lwz r26,saver26+4(r31) ; Restore R26
3262 lwz r27,saver27+4(r31) ; Restore R27
3263
3264 beq+ noPerfMonRestore32 ; No perf monitor...
3265
3266 beq- cr1,perfMonRestore32_750 ; This is a G3...
3267 beq- cr2,perfMonRestore32_7400 ; Standard G4...
3268
3269 lwz r28,savepmc+16(r31)
3270 lwz r29,savepmc+20(r31)
3271 mtspr pmc5,r28 ; Restore PMC5
3272 mtspr pmc6,r29 ; Restore PMC6
3273
3274 perfMonRestore32_7400:
3275 lwz r28,savemmcr2+4(r31)
3276 mtspr mmcr2,r28 ; Restore MMCR2
3277
3278 perfMonRestore32_750:
3279 lwz r28,savepmc+0(r31)
3280 lwz r29,savepmc+4(r31)
3281 mtspr pmc1,r28 ; Restore PMC1
3282 mtspr pmc2,r29 ; Restore PMC2
3283 lwz r28,savepmc+8(r31)
3284 lwz r29,savepmc+12(r31)
3285 mtspr pmc3,r28 ; Restore PMC3
3286 mtspr pmc4,r29 ; Restore PMC4
3287 lwz r28,savemmcr1+4(r31)
3288 lwz r29,savemmcr0+4(r31)
3289 mtspr mmcr1,r28 ; Restore MMCR1
3290 mtspr mmcr0,r29 ; Restore MMCR0
3291
3292 noPerfMonRestore32:
3293 lwz r28,savecr(r31) ; Get CR to restore
3294 lwz r29,savexer+4(r31) ; Get XER to restore
3295 mtcr r28 ; Restore the CR
3296 lwz r28,savelr+4(r31) ; Get LR to restore
3297 mtxer r29 ; Restore the XER
3298 lwz r29,savectr+4(r31) ; Get the CTR to restore
3299 mtlr r28 ; Restore the LR
3300 lwz r28,saver30+4(r31) ; Get R30
3301 mtctr r29 ; Restore the CTR
3302 lwz r29,saver31+4(r31) ; Get R31
3303 mtsprg 2,r28 ; Save R30 for later
3304 lwz r28,saver28+4(r31) ; Restore R28
3305 mtsprg 3,r29 ; Save R31 for later
3306 lwz r29,saver29+4(r31) ; Restore R29
3307
3308 mfsprg r31,0 ; Get per_proc
3309 mfsprg r30,2 ; Restore R30
3310 lwz r31,pfAvailable(r31) ; Get the feature flags
3311 mtsprg 2,r31 ; Set the feature flags
3312 mfsprg r31,3 ; Restore R31
3313
3314 rfi ; Click heels three times and think very hard that there is no place like home...
3315
3316 .long 0 ; Leave this here
3317 .long 0
3318 .long 0
3319 .long 0
3320 .long 0
3321 .long 0
3322 .long 0
3323 .long 0
3324
3325
3326 ;
3327 ; This starts the 64-bit version
3328 ;
3329
3330 .align 7
3331
3332 eat64a: ld r30,quickfret(r29) ; Pick up the quick fret list, if any
3333
3334 mr. r3,r3 ; Should we hold off the quick release?
3335 la r21,saver0(r31) ; Point to the first thing we restore
3336 bne-- ernoqfre64 ; Hold off set, do not release just now...
3337
3338 erchkfre64: mr. r3,r30 ; Any savearea to quickly release?
3339 beq+ ernoqfre64 ; No quickfrets...
3340 ld r30,SAVprev(r30) ; Chain back now
3341
3342 bl EXT(save_ret_phys) ; Put it on the free list
3343
3344 std r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release)
3345 b erchkfre64 ; Try the next one...
3346
3347 .align 7
3348
3349 ernoqfre64: lwz r30,SAVflags(r31) ; Pick up the flags
3350 lis r0,hi16(SAVinject) ; Get inject flag
3351 dcbt 0,r21 ; Touch in the first thing we need
3352
3353 ;
3354 ; Here we release the savearea.
3355 ;
3356 ; Important!!!! The savearea is released before we are done with it. When the
3357 ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys
3358 ; will trim the list, making the extra saveareas allocatable by another processor
3359 ; The code in there must ALWAYS leave our savearea on the local list, otherwise
3360 ; we could be very, very unhappy. The code there always queues the "just released"
3361 ; savearea to the head of the local list. Then, if it needs to trim, it will
3362 ; start with the SECOND savearea, leaving ours intact.
3363 ;
3364 ; If we are going to inject code here, we must not toss the savearea because
3365 ; we will continue to use it. The code stream to inject is in it and we
3366 ; use it to hold the pre-inject context so that we can merge that with the
3367 ; post-inject context. The field ijsave in the per-proc is used to point to the savearea.
3368 ;
3369 ; Note that we will NEVER pass an interrupt up without first dealing with this savearea.
3370 ;
3371 ; All permanent interruptions (i.e., not denorm, alignment, or handled page and segment faults)
3372 ; will nullify any injected code and pass the interrupt up in the original savearea. A normal
3373 ; inject completion will merge the original context into the new savearea and pass that up.
3374 ;
3375 ; Note that the following code which sets up the injection will only be executed when
3376 ; SAVinject is set. That means that if will not run if we are returning from an alignment
3377 ; or denorm exception, or from a handled page or segment fault.
3378 ;
3379
3380
3381 li r3,lgKillResv ; Get spot to kill reservation
3382 andc r0,r30,r0 ; Clear the inject flag
3383 stdcx. r3,0,r3 ; Blow away any reservations we hold
3384 cmplw cr4,r0,r30 ; Remember if we need to inject
3385 mr r3,r31 ; Get the exiting savearea in parm register
3386 beq++ cr4,noinject3 ; No, we are not going to inject instructions...
3387
3388 stw r0,SAVflags(r31) ; Yes we are, clear the request...
3389
3390 lhz r26,PP_CPU_NUMBER(r29) ; Get the cpu number
3391 lwz r25,saveinstr(r31) ; Get the instruction count
3392 la r3,saveinstr+4(r31) ; Point to the instruction stream
3393 slwi r26,r26,6 ; Get offset to the inject code stream for this processor
3394 li r5,0 ; Get the current instruction offset
3395 ori r26,r26,lo16(EXT(ijcode)) ; Get the base of the inject buffer for this processor (always < 64K)
3396 slwi r25,r25,2 ; Multiply by 4
3397
3398 injctit2: lwzx r6,r5,r3 ; Pick up the instruction
3399 stwx r6,r5,r26 ; Inject into code buffer
3400 addi r5,r5,4 ; Bump offset
3401 cmplw r5,r25 ; Have we hit the end?
3402 blt-- injctit2 ; Continue until we have copied all...
3403
3404 lis r3,0x0FFF ; Build our magic trap
3405 ori r3,r3,0xC9C9 ; Build our magic trap
3406 std r31,ijsave(r29) ; Save the original savearea for injection
3407 stwx r3,r5,r26 ; Save the magic trap
3408
3409 dcbf 0,r26 ; Flush the line
3410 sync ; Hang on until it's done
3411
3412 icbi 0,r26 ; Flush instructions in the line
3413 isync ; Throw anything stale away
3414 sync ; Hang on until it's done
3415 b injected2 ; Skip the savearea release...
3416
3417 noinject3: bl EXT(save_ret_phys) ; Put it on the free list
3418
3419 injected2: lwz r3,savesrr1+4(r31) ; Pass in the MSR we will be going to
3420 bl EXT(switchSegs) ; Go handle the segment registers/STB
3421
3422 ld r8,savesrr1(r31) ; Get destination MSR
3423 cmplw cr3,r14,r14 ; Set that we do not need to stop streams
3424 li r21,emfp0 ; Point to a workarea
3425
3426 ld r25,savesrr0(r31) ; Get the SRR0 to use
3427 la r28,saver16(r31) ; Point to the 128-byte line with r16-r31
3428 dcbz128 r21,r29 ; Clear a work area
3429 ld r0,saver0(r31) ; Restore R0
3430 dcbt 0,r28 ; Touch in r16-r31
3431 ld r1,saver1(r31) ; Restore R1
3432
3433 beq++ cr4,noinject4 ; No code injection here...
3434
3435 ;
3436 ; If we are injecting, we need to stay in supervisor state with instruction
3437 ; address translation off. We also need to have as few potential interruptions as
3438 ; possible. Therefore, we turn off external interruptions and tracing (which doesn't
3439 ; make much sense anyway).
3440 ;
3441 ori r8,r8,lo16(ijemoff) ; Force the need-to-be-off bits on
3442 mr r25,r26 ; Point pc to injection code buffer
3443 xori r8,r8,lo16(ijemoff) ; Turn off all of the need-to-be-off bits
3444
3445 noinject4: ld r2,saver2(r31) ; Restore R2
3446 ld r3,saver3(r31) ; Restore R3
3447 mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7)
3448 ld r4,saver4(r31) ; Restore R4
3449 mtsrr0 r25 ; Restore the SRR0 now
3450 ld r5,saver5(r31) ; Restore R5
3451 mtsrr1 r8 ; Restore the SRR1 now
3452 ld r6,saver6(r31) ; Restore R6
3453
3454 ld r7,saver7(r31) ; Restore R7
3455 ld r8,saver8(r31) ; Restore R8
3456 ld r9,saver9(r31) ; Restore R9
3457
3458 la r28,savevscr(r31) ; Point to the status area
3459
3460 ld r10,saver10(r31) ; Restore R10
3461 ld r11,saver11(r31) ; Restore R11
3462 ld r12,saver12(r31) ; Restore R12
3463 ld r13,saver13(r31) ; Restore R13
3464
3465 ld r26,next_savearea(r29) ; Get the exception save area
3466
3467 ld r14,saver14(r31) ; Restore R14
3468 ld r15,saver15(r31) ; Restore R15
3469 lwz r27,savevrsave(r31) ; Get the vrsave
3470
3471 bf-- pfAltivecb,noavec2s ; Skip if no VMX...
3472
3473 stvxl v0,r21,r29 ; Save a vector register
3474 lvxl v0,0,r28 ; Get the vector status
3475 mtvscr v0 ; Set the vector status
3476
3477 lvxl v0,r21,r29 ; Restore work vector register
3478
3479 noavec2s: mtspr vrsave,r27 ; Set the vrsave
3480
3481 lwz r28,saveexception(r31) ; Get exception type
3482 stfd f0,emfp0(r29) ; Save FP0
3483 lfd f0,savefpscrpad(r31) ; Get the fpscr
3484 mtfsf 0xFF,f0 ; Restore fpscr
3485 lfd f0,emfp0(r29) ; Restore the used register
3486 ld r16,saver16(r31) ; Restore R16
3487 lwz r30,spcFlags(r29) ; Get the special flags from per_proc
3488 ld r17,saver17(r31) ; Restore R17
3489 ld r18,saver18(r31) ; Restore R18
3490 cmplwi cr1,r28,T_RESET ; Are we returning from a reset?
3491 ld r19,saver19(r31) ; Restore R19
3492 ld r20,saver20(r31) ; Restore R20
3493 li r27,0 ; Get a zero
3494 ld r21,saver21(r31) ; Restore R21
3495 la r26,saver0(r26) ; Point to the first part of the next savearea
3496 andis. r30,r30,hi16(perfMonitor) ; Is the performance monitor enabled?
3497 ld r22,saver22(r31) ; Restore R22
3498 ld r23,saver23(r31) ; Restore R23
3499 bne++ cr1,er64rrst ; We are not returning from a reset...
3500 stw r27,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Allow resets again
3501
3502 er64rrst: ld r24,saver24(r31) ; Restore R24
3503
3504 dcbz128 0,r26 ; Clear and allocate next savearea we use in the off chance it is still in when we next interrupt
3505
3506 ld r25,saver25(r31) ; Restore R25
3507 ld r26,saver26(r31) ; Restore R26
3508 ld r27,saver27(r31) ; Restore R27
3509
3510 beq++ noPerfMonRestore64 ; Nope...
3511
3512 lwz r28,savepmc+0(r31)
3513 lwz r29,savepmc+4(r31)
3514 mtspr pmc1_gp,r28 ; Restore PMC1
3515 mtspr pmc2_gp,r29 ; Restore PMC2
3516 lwz r28,savepmc+8(r31)
3517 lwz r29,savepmc+12(r31)
3518 mtspr pmc3_gp,r28 ; Restore PMC3
3519 mtspr pmc4_gp,r29 ; Restore PMC4
3520 lwz r28,savepmc+16(r31)
3521 lwz r29,savepmc+20(r31)
3522 mtspr pmc5_gp,r28 ; Restore PMC5
3523 mtspr pmc6_gp,r29 ; Restore PMC6
3524 lwz r28,savepmc+24(r31)
3525 lwz r29,savepmc+28(r31)
3526 mtspr pmc7_gp,r28 ; Restore PMC7
3527 mtspr pmc8_gp,r29 ; Restore PMC8
3528 ld r28,savemmcr1(r31)
3529 ld r29,savemmcr2(r31)
3530 mtspr mmcr1_gp,r28 ; Restore MMCR1
3531 mtspr mmcra_gp,r29 ; Restore MMCRA
3532 ld r28,savemmcr0(r31)
3533
3534 mtspr mmcr0_gp,r28 ; Restore MMCR0
3535
3536 noPerfMonRestore64:
3537 mfsprg r30,0 ; Get per_proc
3538 lwz r28,savecr(r31) ; Get CR to restore
3539 ld r29,savexer(r31) ; Get XER to restore
3540 mtcr r28 ; Restore the CR
3541 ld r28,savelr(r31) ; Get LR to restore
3542 mtxer r29 ; Restore the XER
3543 ld r29,savectr(r31) ; Get the CTR to restore
3544 mtlr r28 ; Restore the LR
3545 ld r28,saver30(r31) ; Get R30
3546 mtctr r29 ; Restore the CTR
3547 ld r29,saver31(r31) ; Get R31
3548 mtspr hsprg0,r28 ; Save R30 for later
3549 ld r28,saver28(r31) ; Restore R28
3550 mtsprg 3,r29 ; Save R31 for later
3551 ld r29,saver29(r31) ; Restore R29
3552
3553 lwz r31,pfAvailable(r30) ; Get the feature flags
3554 ld r30,UAW(r30) ; Get the User Assist DoubleWord
3555 mtsprg 2,r31 ; Set the feature flags
3556 mfsprg r31,3 ; Restore R31
3557 mtsprg 3,r30 ; Set the UAW
3558 mfspr r30,hsprg0 ; Restore R30
3559
3560 rfid ; Click heels three times and think very hard that there is no place like home...
3561
3562
3563
3564 /*
3565 * exception_exit(savearea *)
3566 *
3567 *
3568 * ENTRY : IR and/or DR and/or interruptions can be on
3569 * R3 points to the virtual address of a savearea
3570 */
3571
3572 .align 5
3573 .globl EXT(exception_exit)
3574
3575 LEXT(exception_exit)
3576
3577 mfsprg r29,2 ; Get feature flags
3578 mr r31,r3 ; Get the savearea in the right register
3579 mtcrf 0x04,r29 ; Set the features
3580 li r0,1 ; Get this just in case
3581 mtcrf 0x02,r29 ; Set the features
3582 lis r30,hi16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Set up the MSR we will use throughout. Note that ME come on here if MCK
3583 rlwinm r4,r3,0,0,19 ; Round down to savearea block base
3584 lis r1,hi16(SAVredrive) ; Get redrive request
3585 mfsprg r2,0 ; Get the per_proc block
3586 ori r30,r30,lo16(MASK(MSR_VEC)|MASK(MSR_FP)|MASK(MSR_ME)) ; Rest of MSR
3587 bt++ pf64Bitb,eeSixtyFour ; We are 64-bit...
3588
3589 lwz r4,SACvrswap+4(r4) ; Get the virtual to real translation
3590
3591 bt pfNoMSRirb,eeNoMSR ; No MSR...
3592
3593 mtmsr r30 ; Translation and all off
3594 isync ; Toss prefetch
3595 b eeNoMSRx
3596
3597 .align 5
3598
3599 eeSixtyFour:
3600 ld r4,SACvrswap(r4) ; Get the virtual to real translation
3601 rldimi r30,r0,63,MSR_SF_BIT ; Set SF bit (bit 0)
3602 mtmsrd r30 ; Set 64-bit mode, turn off EE, DR, and IR
3603 isync ; Toss prefetch
3604 b eeNoMSRx
3605
3606 .align 5
3607
3608 eeNoMSR: li r0,loadMSR ; Get the MSR setter SC
3609 mr r3,r30 ; Get new MSR
3610 sc ; Set it
3611
3612 eeNoMSRx: xor r31,r31,r4 ; Convert the savearea to physical addressing
3613 lwz r4,SAVflags(r31) ; Pick up the flags
3614 mr r13,r31 ; Put savearea here also
3615
3616 and. r0,r4,r1 ; Check if redrive requested
3617
3618 dcbt br0,r2 ; We will need this in just a sec
3619
3620 beq+ EatRupt ; No redrive, just exit...
3621
3622 0: mftbu r2 ; Avoid using an obsolete timestamp for the redrive
3623 mftb r4
3624 mftbu r0
3625 cmplw r0,r2
3626 bne-- 0b
3627
3628 stw r2,SAVtime(r13)
3629 stw r4,SAVtime+4(r13)
3630
3631 lwz r11,saveexception(r13) ; Restore exception code
3632 b Redrive ; Redrive the exception...
3633
3634
3635
3636 .align 12 ; Force page alignment
3637
3638 .globl EXT(ExceptionVectorsEnd)
3639 EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */
3640
3641
3642
3643
3644 ;
3645 ; Here is where we keep the low memory globals
3646 ;
3647
3648 . = 0x5000
3649
3650 .ascii "Hagfish " ; 5000 Unique eyecatcher
3651 .long 0 ; 5008 Zero
3652 .long 0 ; 500C Zero cont...
3653 .long EXT(PerProcTable) ; 5010 pointer to per_proc_entry table
3654 .long 0 ; 5014 Zero
3655
3656 .globl EXT(mckFlags)
3657 EXT(mckFlags):
3658 .long 0 ; 5018 Machine check flags
3659
3660 .long EXT(version) ; 501C Pointer to kernel version string
3661 .long 0 ; 5020 physical memory window virtual address
3662 .long 0 ; 5024 physical memory window virtual address
3663 .long 0 ; 5028 user memory window virtual address
3664 .long 0 ; 502C user memory window virtual address
3665 .long 0 ; 5030 VMM boot-args forced feature flags
3666
3667 .globl EXT(maxDec)
3668 EXT(maxDec):
3669 .long 0x7FFFFFFF ; 5034 maximum decrementer value
3670
3671
3672 .globl EXT(pmsCtlp)
3673 EXT(pmsCtlp):
3674 .long 0 ; 5038 Pointer to power management stepper control
3675
3676 .long 0 ; 503C reserved
3677 .long 0 ; 5040 reserved
3678 .long 0 ; 5044 reserved
3679 .long 0 ; 5048 reserved
3680 .long 0 ; 504C reserved
3681 .long 0 ; 5050 reserved
3682 .long 0 ; 5054 reserved
3683 .long 0 ; 5058 reserved
3684 .long 0 ; 505C reserved
3685 .long 0 ; 5060 reserved
3686 .long 0 ; 5064 reserved
3687 .long 0 ; 5068 reserved
3688 .long 0 ; 506C reserved
3689 .long 0 ; 5070 reserved
3690 .long 0 ; 5074 reserved
3691 .long 0 ; 5078 reserved
3692 .long 0 ; 507C reserved
3693
3694 .globl EXT(trcWork)
3695 EXT(trcWork):
3696 .long 0 ; 5080 The next trace entry to use
3697 #if DEBUG
3698 .long 0xFFFFFFFF ; 5084 All enabled
3699 #else
3700 .long 0x00000000 ; 5084 All disabled on non-debug systems
3701 #endif
3702 .long 0 ; 5088 Start of the trace table
3703 .long 0 ; 508C End (wrap point) of the trace
3704 .long 0 ; 5090 Saved mask while in debugger
3705 .long 0 ; 5094 Size of trace table (1 - 256 pages)
3706 .long 0 ; 5098 traceGas[0]
3707 .long 0 ; 509C traceGas[1]
3708
3709 .long 0 ; 50A0 reserved
3710 .long 0 ; 50A4 reserved
3711 .long 0 ; 50A8 reserved
3712 .long 0 ; 50AC reserved
3713 .long 0 ; 50B0 reserved
3714 .long 0 ; 50B4 reserved
3715 .long 0 ; 50B8 reserved
3716 .long 0 ; 50BC reserved
3717 .long 0 ; 50C0 reserved
3718 .long 0 ; 50C4 reserved
3719 .long 0 ; 50C8 reserved
3720 .long 0 ; 50CC reserved
3721 .long 0 ; 50D0 reserved
3722 .long 0 ; 50D4 reserved
3723 .long 0 ; 50D8 reserved
3724 .long 0 ; 50DC reserved
3725 .long 0 ; 50E0 reserved
3726 .long 0 ; 50E4 reserved
3727 .long 0 ; 50E8 reserved
3728 .long 0 ; 50EC reserved
3729 .long 0 ; 50F0 reserved
3730 .long 0 ; 50F4 reserved
3731 .long 0 ; 50F8 reserved
3732 .long 0 ; 50FC reserved
3733
3734 .globl EXT(saveanchor)
3735
3736 EXT(saveanchor): ; 5100 saveanchor
3737 .set .,.+SVsize
3738
3739 .long 0 ; 5140 reserved
3740 .long 0 ; 5144 reserved
3741 .long 0 ; 5148 reserved
3742 .long 0 ; 514C reserved
3743 .long 0 ; 5150 reserved
3744 .long 0 ; 5154 reserved
3745 .long 0 ; 5158 reserved
3746 .long 0 ; 515C reserved
3747 .long 0 ; 5160 reserved
3748 .long 0 ; 5164 reserved
3749 .long 0 ; 5168 reserved
3750 .long 0 ; 516C reserved
3751 .long 0 ; 5170 reserved
3752 .long 0 ; 5174 reserved
3753 .long 0 ; 5178 reserved
3754 .long 0 ; 517C reserved
3755
3756 .long 0 ; 5180 tlbieLock
3757
3758 .long 0 ; 5184 reserved
3759 .long 0 ; 5188 reserved
3760 .long 0 ; 518C reserved
3761 .long 0 ; 5190 reserved
3762 .long 0 ; 5194 reserved
3763 .long 0 ; 5198 reserved
3764 .long 0 ; 519C reserved
3765 .long 0 ; 51A0 reserved
3766 .long 0 ; 51A4 reserved
3767 .long 0 ; 51A8 reserved
3768 .long 0 ; 51AC reserved
3769 .long 0 ; 51B0 reserved
3770 .long 0 ; 51B4 reserved
3771 .long 0 ; 51B8 reserved
3772 .long 0 ; 51BC reserved
3773 .long 0 ; 51C0 reserved
3774 .long 0 ; 51C4 reserved
3775 .long 0 ; 51C8 reserved
3776 .long 0 ; 51CC reserved
3777 .long 0 ; 51D0 reserved
3778 .long 0 ; 51D4 reserved
3779 .long 0 ; 51D8 reserved
3780 .long 0 ; 51DC reserved
3781 .long 0 ; 51E0 reserved
3782 .long 0 ; 51E4 reserved
3783 .long 0 ; 51E8 reserved
3784 .long 0 ; 51EC reserved
3785 .long 0 ; 51F0 reserved
3786 .long 0 ; 51F4 reserved
3787 .long 0 ; 51F8 reserved
3788 .long 0 ; 51FC reserved
3789
3790 .globl EXT(dgWork)
3791
3792 EXT(dgWork):
3793 .long 0 ; 5200 dgLock
3794 .long 0 ; 5204 dgFlags
3795 .long 0 ; 5208 dgMisc0
3796 .long 0 ; 520C dgMisc1
3797 .long 0 ; 5210 dgMisc2
3798 .long 0 ; 5214 dgMisc3
3799 .long 0 ; 5218 dgMisc4
3800 .long 0 ; 521C dgMisc5
3801
3802 .globl EXT(LcksOpts)
3803 EXT(LcksOpts):
3804 .long 0 ; 5220 lcksWork
3805 .long 0 ; 5224 reserved
3806 .long 0 ; 5228 reserved
3807 .long 0 ; 522C reserved
3808 .long 0 ; 5230 reserved
3809 .long 0 ; 5234 reserved
3810 .long 0 ; 5238 reserved
3811 .long 0 ; 523C reserved
3812 .long 0 ; 5240 reserved
3813 .long 0 ; 5244 reserved
3814 .long 0 ; 5248 reserved
3815 .long 0 ; 524C reserved
3816 .long 0 ; 5250 reserved
3817 .long 0 ; 5254 reserved
3818 .long 0 ; 5258 reserved
3819 .long 0 ; 525C reserved
3820 .long 0 ; 5260 reserved
3821 .long 0 ; 5264 reserved
3822 .long 0 ; 5268 reserved
3823 .long 0 ; 526C reserved
3824 .long 0 ; 5270 reserved
3825 .long 0 ; 5274 reserved
3826 .long 0 ; 5278 reserved
3827 .long 0 ; 527C reserved
3828
3829 .globl EXT(pPcfg)
3830 EXT(pPcfg):
3831 .long 0x80000000 | (12 << 8) | 12 ; 5280 pcfDefPcfg - 4k
3832 .long 0 ; 5284 pcfLargePcfg
3833 .long 0 ; 5288 Non-primary page configurations
3834 .long 0 ; 528C Non-primary page configurations
3835 .long 0 ; 5290 Non-primary page configurations
3836 .long 0 ; 5294 Non-primary page configurations
3837 .long 0 ; 5298 Non-primary page configurations
3838 .long 0 ; 529C Non-primary page configurations
3839
3840 .long 0 ; 52A0 reserved
3841 .long 0 ; 52A4 reserved
3842 .long 0 ; 52A8 reserved
3843 .long 0 ; 52AC reserved
3844 .long 0 ; 52B0 reserved
3845 .long 0 ; 52B4 reserved
3846 .long 0 ; 52B8 reserved
3847 .long 0 ; 52BC reserved
3848 .long 0 ; 52C0 reserved
3849 .long 0 ; 52C4 reserved
3850 .long 0 ; 52C8 reserved
3851 .long 0 ; 52CC reserved
3852 .long 0 ; 52D0 reserved
3853 .long 0 ; 52D4 reserved
3854 .long 0 ; 52D8 reserved
3855 .long 0 ; 52DC reserved
3856 .long 0 ; 52E0 reserved
3857 .long 0 ; 52E4 reserved
3858 .long 0 ; 52E8 reserved
3859 .long 0 ; 52EC reserved
3860 .long 0 ; 52F0 reserved
3861 .long 0 ; 52F4 reserved
3862 .long 0 ; 52F8 reserved
3863 .long 0 ; 52FC reserved
3864
3865 .globl EXT(killresv)
3866 EXT(killresv):
3867
3868 .long 0 ; 5300 Used to kill reservations
3869 .long 0 ; 5304 Used to kill reservations
3870 .long 0 ; 5308 Used to kill reservations
3871 .long 0 ; 530C Used to kill reservations
3872 .long 0 ; 5310 Used to kill reservations
3873 .long 0 ; 5314 Used to kill reservations
3874 .long 0 ; 5318 Used to kill reservations
3875 .long 0 ; 531C Used to kill reservations
3876 .long 0 ; 5320 Used to kill reservations
3877 .long 0 ; 5324 Used to kill reservations
3878 .long 0 ; 5328 Used to kill reservations
3879 .long 0 ; 532C Used to kill reservations
3880 .long 0 ; 5330 Used to kill reservations
3881 .long 0 ; 5334 Used to kill reservations
3882 .long 0 ; 5338 Used to kill reservations
3883 .long 0 ; 533C Used to kill reservations
3884 .long 0 ; 5340 Used to kill reservations
3885 .long 0 ; 5344 Used to kill reservations
3886 .long 0 ; 5348 Used to kill reservations
3887 .long 0 ; 534C Used to kill reservations
3888 .long 0 ; 5350 Used to kill reservations
3889 .long 0 ; 5354 Used to kill reservations
3890 .long 0 ; 5358 Used to kill reservations
3891 .long 0 ; 535C Used to kill reservations
3892 .long 0 ; 5360 Used to kill reservations
3893 .long 0 ; 5364 Used to kill reservations
3894 .long 0 ; 5368 Used to kill reservations
3895 .long 0 ; 536C Used to kill reservations
3896 .long 0 ; 5370 Used to kill reservations
3897 .long 0 ; 5374 Used to kill reservations
3898 .long 0 ; 5378 Used to kill reservations
3899 .long 0 ; 537C Used to kill reservations
3900
3901 .long 0 ; 5380 reserved
3902 .long 0 ; 5384 reserved
3903 .long 0 ; 5388 reserved
3904 .long 0 ; 538C reserved
3905 .long 0 ; 5390 reserved
3906 .long 0 ; 5394 reserved
3907 .long 0 ; 5398 reserved
3908 .long 0 ; 539C reserved
3909 .long 0 ; 53A0 reserved
3910 .long 0 ; 53A4 reserved
3911 .long 0 ; 53A8 reserved
3912 .long 0 ; 53AC reserved
3913 .long 0 ; 53B0 reserved
3914 .long 0 ; 53B4 reserved
3915 .long 0 ; 53B8 reserved
3916 .long 0 ; 53BC reserved
3917 .long 0 ; 53C0 reserved
3918 .long 0 ; 53C4 reserved
3919 .long 0 ; 53C8 reserved
3920 .long 0 ; 53CC reserved
3921 .long 0 ; 53D0 reserved
3922 .long 0 ; 53D4 reserved
3923 .long 0 ; 53D8 reserved
3924 .long 0 ; 53DC reserved
3925 .long 0 ; 53E0 reserved
3926 .long 0 ; 53E4 reserved
3927 .long 0 ; 53E8 reserved
3928 .long 0 ; 53EC reserved
3929 .long 0 ; 53F0 reserved
3930 .long 0 ; 53F4 reserved
3931 .long 0 ; 53F8 reserved
3932 .long 0 ; 53FC reserved
3933 .long 0 ; 5400 reserved
3934 .long 0 ; 5404 reserved
3935 .long 0 ; 5408 reserved
3936 .long 0 ; 540C reserved
3937 .long 0 ; 5410 reserved
3938 .long 0 ; 5414 reserved
3939 .long 0 ; 5418 reserved
3940 .long 0 ; 541C reserved
3941 .long 0 ; 5420 reserved
3942 .long 0 ; 5424 reserved
3943 .long 0 ; 5428 reserved
3944 .long 0 ; 542C reserved
3945 .long 0 ; 5430 reserved
3946 .long 0 ; 5434 reserved
3947 .long 0 ; 5438 reserved
3948 .long 0 ; 543C reserved
3949 .long 0 ; 5440 reserved
3950 .long 0 ; 5444 reserved
3951 .long 0 ; 5448 reserved
3952 .long 0 ; 544C reserved
3953 .long 0 ; 5450 reserved
3954 .long 0 ; 5454 reserved
3955 .long 0 ; 5458 reserved
3956 .long 0 ; 545C reserved
3957 .long 0 ; 5460 reserved
3958 .long 0 ; 5464 reserved
3959 .long 0 ; 5468 reserved
3960 .long 0 ; 546C reserved
3961 .long 0 ; 5470 reserved
3962 .long 0 ; 5474 reserved
3963 .long 0 ; 5478 reserved
3964 .long 0 ; 547C reserved
3965 .long EXT(kmod) ; 5480 Pointer to kmod, debugging aid
3966 .long EXT(kdp_trans_off) ; 5484 Pointer to kdp_trans_off, debugging aid
3967 .long EXT(kdp_read_io) ; 5488 Pointer to kdp_read_io, debugging aid
3968 .long 0 ; 548C Reserved for developer use
3969 .long 0 ; 5490 Reserved for developer use
3970 .long EXT(osversion) ; 5494 Pointer to osversion string, debugging aid
3971 .long EXT(flag_kdp_trigger_reboot) ; 5498 Pointer to KDP reboot trigger, debugging aid
3972
3973 ;
3974 ; The "shared page" is used for low-level debugging and is actually 1/2 page long
3975 ;
3976
3977 . = 0x6000
3978 .globl EXT(sharedPage)
3979
3980 EXT(sharedPage): ; This is a debugging page shared by all processors
3981 .long 0xC24BC195 ; Comm Area validity value
3982 .long 0x87859393 ; Comm Area validity value
3983 .long 0xE681A2C8 ; Comm Area validity value
3984 .long 0x8599855A ; Comm Area validity value
3985 .long 0xD74BD296 ; Comm Area validity value
3986 .long 0x8388E681 ; Comm Area validity value
3987 .long 0xA2C88599 ; Comm Area validity value
3988 .short 0x855A ; Comm Area validity value
3989 .short 1 ; Comm Area version number
3990 .fill 504*4,1,0 ; (filled with 0s)
3991
3992 ;
3993 ; The ijcode area is used for code injection. It is 1/2 page long and will allow 32 processors to inject
3994 ; 16 instructions each concurrently.
3995 ;
3996
3997 .globl EXT(ijcode)
3998
3999 EXT(ijcode): ; Code injection area
4000 .fill 512*4,1,0 ; 6800 32x64 slots for code injection streams
4001
4002 .data
4003 .align ALIGN
4004 .globl EXT(exception_end)
4005 EXT(exception_end):
4006 .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */
4007
4008
4009