/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <assym.s>
#include <debug.h>
-#include <cpus.h>
#include <db_machine_commands.h>
#include <mach_debug.h>
VECTOR_SEGMENT
+ .globl EXT(lowGlo)
+EXT(lowGlo):
- .globl EXT(ExceptionVectorsStart)
+ .globl EXT(ExceptionVectorsStart)
EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */
baseR: /* Used so we have more readable code */
rxIg64: mtcr r11 ; Restore the CR
mfsprg r11,0 ; Get per_proc
mtspr hsprg0,r14 ; Save a register
- lwz r14,UAW(r11) ; Get the User Assist Word
+ ld r14,UAW(r11) ; Get the User Assist DoubleWord
mfsprg r13,2 ; Restore R13
lwz r11,pfAvailable(r11) ; Get the features
mtsprg 2,r11 ; Restore sprg2
li r11,T_RESERVED /* Set 'rupt code */
b .L_exception_entry /* Join common... */
+
+; System Calls (sc instruction)
;
-; System call - generated by the sc instruction
-;
-; We handle the ultra-fast traps right here. They are:
-;
-; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask
-; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv
-; 0x00007FF2 - User state only - thread info
-; 0x00007FF3 - User state only - floating point / vector facility status
-; 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines
-;
-; Note: none handled if virtual machine is running
-; Also, it we treat SCs as kernel SCs if the RI bit is set
+; The syscall number is in r0. All we do here is munge the number into an
+; 8-bit index into the "scTable", and dispatch on it to handle the Ultra
+; Fast Traps (UFTs.) The index is:
;
+; 0x80 - set if syscall number is 0x80000000 (CutTrace)
+; 0x40 - set if syscall number is 0x00006004
+; 0x20 - set if upper 29 bits of syscall number are 0xFFFFFFF8
+; 0x10 - set if upper 29 bits of syscall number are 0x00007FF0
+; 0x0E - low three bits of syscall number
+; 0x01 - zero, as scTable is an array of shorts
. = 0xC00
.L_handlerC00:
mtsprg 3,r11 ; Save R11
- mfsprg r11,2 ; Get the feature flags
-
mtsprg 2,r13 ; Save R13
- rlwinm r11,r11,pf64Bitb-4,4,4 ; Get the 64-bit flag
- mfsrr1 r13 ; Get SRR1 for loadMSR
- rlwimi r11,r13,MSR_PR_BIT-5,5,5 ; Move the PR bit to bit 1
- mfcr r13 ; Save the CR
-
- mtcrf 0x40,r11 ; Get the top 3 CR bits to 64-bit, PR, sign
-
- cmpwi r0,lo16(-3) ; Eliminate all negatives but -1 and -2
- mfsprg r11,0 ; Get the per_proc
- bf-- 5,uftInKern ; We came from the kernel...
- ble-- notufp ; This is a mach call
-
- lwz r11,spcFlags(r11) ; Pick up the special flags
-
- cmpwi cr7,r0,lo16(-1) ; Is this a BlueBox call?
- cmplwi cr2,r0,0x7FF2 ; Ultra fast path cthread info call?
- cmplwi cr3,r0,0x7FF3 ; Ultra fast path facility status?
- cror cr4_eq,cr2_eq,cr3_eq ; Is this one of the two ufts we handle here?
-
- ble-- cr7,uftBBCall ; We think this is blue box call...
-
- rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
- andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
- cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
- beq-- cr0,ufpVM ; fast paths running VM ...
-
- bne-- cr4_eq,notufp ; Bail ifthis is not a uft...
-
-;
-; Handle normal user ultra-fast trap
-;
-
- li r3,spcFlags ; Assume facility status - 0x7FF3
-
- beq-- cr3,uftFacStat ; This is a facilities status call...
-
- li r3,UAW ; This is really a thread info call - 0x7FF2
-
-uftFacStat: mfsprg r11,0 ; Get the per_proc
- lwzx r3,r11,r3 ; Get the UAW or spcFlags field
-
-uftExit: bt++ 4,uftX64 ; Go do the 64-bit exit...
-
- lwz r11,pfAvailable(r11) ; Get the feature flags
- mtcrf 255,r13 ; Restore the CRs
- mfsprg r13,2 ; Restore R13
- mtsprg 2,r11 ; Set the feature flags
- mfsprg r11,3 ; Restore R11
-
- rfi ; Back to our guy...
-
-uftX64: mtspr hsprg0,r14 ; Save a register
-
- lwz r14,UAW(r11) ; Get the User Assist Word
- lwz r11,pfAvailable(r11) ; Get the feature flags
-
- mtcrf 255,r13 ; Restore the CRs
-
- mfsprg r13,2 ; Restore R13
- mtsprg 2,r11 ; Set the feature flags
- mfsprg r11,3 ; Restore R11
- mtsprg 3,r14 ; Set the UAW in sprg3
- mfspr r14,hsprg0 ; Restore R14
-
- rfid ; Back to our guy...
-
-;
-; Handle BlueBox ultra-fast trap
-;
-
-uftBBCall: andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
- cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
- blt-- notufp ; No...
-
- rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
-
- mfsprg r11,0 ; Get the per proc
-
- beq++ cr7,uftExit ; For MKIsPreemptiveTask we are done...
-
- lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv from per_proc_area
- b uftExit ; We are really all done now...
-
-; Kernel ultra-fast trap
-
-uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR?
- bne- notufp ; Someone is trying to cheat...
-
- mtsrr1 r3 ; Set new MSR
-
- b uftExit ; Go load the new MSR...
-
-notufp: mtcrf 0xFF,r13 ; Restore the used CRs
- li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code
- b .L_exception_entry ; Join common...
-
-
-
-
+ rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
+ xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
+ addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
+ cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
+ cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
+ xoris r0,r0,0x8000 ; Flip bit to make 0 iff 0x80000000
+ rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
+ cntlzw r13,r0 ; Set bit 0x20 iff 0x80000000
+ xoris r0,r0,0x8000 ; Flip bit to restore R0
+ rlwimi r11,r13,2,0x80 ; Set bit 0x80 iff CutTrace
+ xori r13,r0,0x6004 ; start to check for 0x6004
+ rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
+ cntlzw r13,r13 ; set bit 0x20 iff 0x6004
+ rlwinm r11,r11,0,0,30 ; clear out bit 31
+ rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
+ lhz r11,lo16(scTable)(r11) ; get branch address from sc table
+ mfctr r13 ; save caller's ctr in r13
+ mtctr r11 ; set up branch to syscall handler
+ mfsprg r11,0 ; get per_proc, which most UFTs use
+ bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
/*
* Trace - generated by single stepping
lwz r2,tempr1+4(r11) ; Restore work register
lwz r3,tempr2+4(r11) ; Restore work register
beq cr2,notspectr ; Buffer filled, make a rupt...
- b uftExit ; Go restore and leave...
+ mtcr r13 ; Restore CR
+ b uftRFI ; Go restore and leave...
sbxx64c: ld r1,tempr0(r11) ; Restore work register
ld r2,tempr1(r11) ; Restore work register
ld r3,tempr2(r11) ; Restore work register
beq cr2,notspectr ; Buffer filled, make a rupt...
- b uftExit ; Go restore and leave...
+ mtcr r13 ; Restore CR
+ b uftRFI ; Go restore and leave...
/*
* Floating point assist
li r11,T_INSTRUMENTATION /* Set 'rupt code */
b .L_exception_entry /* Join common... */
- . = 0x2100
-/*
- * Filter Ultra Fast Path syscalls for VMM
+
+ .data
+ .align ALIGN
+ .globl EXT(exception_entry)
+EXT(exception_entry):
+ .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */
+
+ VECTOR_SEGMENT
+
+/*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
+ *
+ * First-level syscall dispatch. The syscall vector maps r0 (the syscall number) into an
+ * index into the "scTable" (below), and then branches to one of these routines. The PPC
+ * syscalls come in several varieties, as follows:
+ *
+ * 1. If (syscall & 0xFFFFF000) == 0x00007000, then it is a PPC Fast Trap or UFT.
+ * The UFTs are dispatched here, the Fast Traps are dispatched in hw_exceptions.s.
+ *
+ * 2. If (syscall & 0xFFFFF000) == 0x00006000, then it is a PPC-only trap.
+ * One of these (0x6004) is a UFT, but most are dispatched in hw_exceptions.s. These
+ * are mostly Blue Box or VMM (Virtual Machine) calls.
+ *
+ * 3. If (syscall & 0xFFFFFFF0) == 0xFFFFFFF0, then it is also a UFT and is dispatched here.
+ *
+ * 4. If (syscall & 0xFFFFF000) == 0x80000000, then it is a "firmware" call and is dispatched in
+ * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here as an ultra
+ * fast trap.
+ *
+ * 5. If (syscall & 0xFFFFF000) == 0xFFFFF000, and it is not one of the above, then it is a Mach
+ * syscall, which are dispatched in hw_exceptions.s via "mach_trap_table".
+ *
+ * 6. If (syscall & 0xFFFFF000) == 0x00000000, then it is a BSD syscall, which are dispatched
+ * by "unix_syscall" using the "sysent" table.
+ *
+ * What distinguishes the UFTs, aside from being ultra fast, is that they cannot rely on translation
+ * being on, and so cannot look at the activation or task control block, etc. We handle them right
+ * here, and return to the caller without turning interrupts or translation on. The UFTs are:
+ *
+ * 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask
+ * 0xFFFFFFFE - BlueBox only - MKIsPreemptiveTaskEnv
+ * 0x00007FF2 - User state only - thread info (32-bit mode)
+ * 0x00007FF3 - User state only - floating point / vector facility status
+ * 0x00007FF4 - Kernel only - loadMSR - not used on 64-bit machines
+ * 0x00006004 - vmm_dispatch (only some of which are UFTs)
+ *
+ * "scTable" is an array of 2-byte addresses, accessed using a 7-bit index derived from the syscall
+ * number as follows:
+ *
+ * 0x80 (A) - set if syscall number is 0x80000000
+ * 0x40 (B) - set if syscall number is 0x00006004
+ * 0x20 (C) - set if upper 29 bits of syscall number are 0xFFFFFFF8
+ * 0x10 (D) - set if upper 29 bits of syscall number are 0x00007FF0
+ * 0x0E (E) - low three bits of syscall number
+ *
+ * If you define another UFT, try to use a number in one of the currently decoded ranges, ie one marked
+ * "unassigned" below. The dispatch table and the UFT handlers must reside in the first 32KB of
+ * physical memory.
*/
-ufpVM:
- cmpwi cr2,r0,0x6004 ; Is it vmm_dispatch
- bne cr2,notufp ; Exit If not
+
+ .align 8 ; start this table on a 256-byte boundry
+scTable: ; ABCD E
+ .short uftNormalSyscall-baseR ; 0000 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0001 0 0x7FF0 is unassigned
+ .short uftNormalSyscall-baseR ; 0001 1 0x7FF1 is Set Thread Info Fast Trap (pass up)
+ .short uftThreadInfo-baseR ; 0001 2 0x7FF2 is Thread Info
+ .short uftFacilityStatus-baseR ; 0001 3 0x7FF3 is Facility Status
+ .short uftLoadMSR-baseR ; 0001 4 0x7FF4 is Load MSR
+ .short uftNormalSyscall-baseR ; 0001 5 0x7FF5 is the Null FastPath Trap (pass up)
+ .short uftNormalSyscall-baseR ; 0001 6 0x7FF6 is unassigned
+ .short uftNormalSyscall-baseR ; 0001 7 0x7FF7 is unassigned
+
+ .short uftNormalSyscall-baseR ; 0010 0 0xFFFFFFF0 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 1 0xFFFFFFF1 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 2 0xFFFFFFF2 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 3 0xFFFFFFF3 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 4 0xFFFFFFF4 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 5 0xFFFFFFF5 is unassigned
+ .short uftIsPreemptiveTaskEnv-baseR ; 0010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv
+ .short uftIsPreemptiveTask-baseR ; 0010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask
+
+ .short WhoaBaby-baseR ; 0011 0 impossible combination
+ .short WhoaBaby-baseR ; 0011 1 impossible combination
+ .short WhoaBaby-baseR ; 0011 2 impossible combination
+ .short WhoaBaby-baseR ; 0011 3 impossible combination
+ .short WhoaBaby-baseR ; 0011 4 impossible combination
+ .short WhoaBaby-baseR ; 0011 5 impossible combination
+ .short WhoaBaby-baseR ; 0011 6 impossible combination
+ .short WhoaBaby-baseR ; 0011 7 impossible combination
+
+ .short WhoaBaby-baseR ; 0100 0 0x6000 is an impossible index (diagCall)
+ .short WhoaBaby-baseR ; 0100 1 0x6001 is an impossible index (vmm_get_version)
+ .short WhoaBaby-baseR ; 0100 2 0x6002 is an impossible index (vmm_get_features)
+ .short WhoaBaby-baseR ; 0100 3 0x6003 is an impossible index (vmm_init_context)
+ .short uftVMM-baseR ; 0100 4 0x6004 is vmm_dispatch (only some of which are UFTs)
+ .short WhoaBaby-baseR ; 0100 5 0x6005 is an impossible index (bb_enable_bluebox)
+ .short WhoaBaby-baseR ; 0100 6 0x6006 is an impossible index (bb_disable_bluebox)
+ .short WhoaBaby-baseR ; 0100 7 0x6007 is an impossible index (bb_settaskenv)
+
+ .short uftNormalSyscall-baseR ; 0101 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0110 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0111 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 7 these syscalls are not in a reserved range
+
+ .short uftCutTrace-baseR ; 1000 0 CutTrace
+ .short uftNormalSyscall-baseR ; 1000 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1001 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1010 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1011 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1100 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1101 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1110 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1111 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 7 these syscalls are not in a reserved range
+
+ .align 2 ; prepare for code
+
+
+/* Ultra Fast Trap (UFT) Handlers:
+ *
+ * We get here directly from the hw syscall vector via the "scTable" vector (above),
+ * with interrupts and VM off, in 64-bit mode if supported, and with all registers live
+ * except the following:
+ *
+ * r11 = per_proc ptr (ie, sprg0)
+ * r13 = holds caller's ctr register
+ * sprg2 = holds caller's r13
+ * sprg3 = holds caller's r11
+ */
+
+; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
+
+uftVMM:
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13 ; save caller's entire cr (we use all fields below)
+ rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
+ andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
+ cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
+ bne-- uftNormal80 ; not eligible for FAM UFTs
cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
- bt- cr1_eq,notufp ; Exit if out of range
- b EXT(vmm_ufp) ; Ultra Fast Path syscall
+ bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
+ b EXT(vmm_ufp) ; handle UFT range of vmm_dispatch syscall
+
+
+; Handle blue box UFTs (syscalls -1 and -2).
+
+uftIsPreemptiveTask:
+uftIsPreemptiveTaskEnv:
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13,0x80 ; save caller's cr0 so we can use it
+ andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
+ cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
+ blt-- uftNormal80 ; No...
+ cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
+ rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
+ mfsprg r11,0 ; Get the per proc once more
+ bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
+ lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv (only difference)
+ b uftRestoreThenRFI ; restore modified cr0 and return
+
+
+; Handle "Thread Info" UFT (0x7FF2)
+ .globl EXT(uft_uaw_nop_if_32bit)
+uftThreadInfo:
+ lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
+LEXT(uft_uaw_nop_if_32bit)
+ ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
+
+
+; Handle "Facility Status" UFT (0x7FF3)
+
+uftFacilityStatus:
+ lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
+
+
+; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
+
+uftLoadMSR:
+ mfsrr1 r11 ; get caller's MSR
+ mtctr r13 ; restore caller's ctr
+ mfcr r13,0x80 ; save caller's cr0 so we can test PR
+ rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
+ bne- uftNormal80 ; do not permit from user mode
+ mfsprg r11,0 ; restore per_proc
+ mtsrr1 r3 ; Set new MSR
+
+
+; Return to caller after UFT. When called:
+; r11 = per_proc ptr
+; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
+; sprg2 = callers r13
+; sprg3 = callers r11
+
+uftRestoreThenRFI: ; WARNING: can drop down to here
+ mtcrf 0x80,r13 ; restore caller's cr0
+uftRFI:
+ .globl EXT(uft_nop_if_32bit)
+LEXT(uft_nop_if_32bit)
+ b uftX64 ; patched to NOP if 32-bit processor
+
+uftX32: lwz r11,pfAvailable(r11) ; Get the feature flags
+ mfsprg r13,2 ; Restore R13
+ mtsprg 2,r11 ; Set the feature flags
+ mfsprg r11,3 ; Restore R11
+ rfi ; Back to our guy...
+
+uftX64: mtspr hsprg0,r14 ; Save a register in a Hypervisor SPRG
+ ld r14,UAW(r11) ; Get the User Assist DoubleWord
+ lwz r11,pfAvailable(r11) ; Get the feature flags
+ mfsprg r13,2 ; Restore R13
+ mtsprg 2,r11 ; Set the feature flags
+ mfsprg r11,3 ; Restore R11
+ mtsprg 3,r14 ; Set the UAW in sprg3
+ mfspr r14,hsprg0 ; Restore R14
+ rfid ; Back to our guy...
+
+;
+; Quickly cut a trace table entry for the CutTrace firmware call.
+;
+; All registers except R11 and R13 are unchanged.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ .align 5
+
+ .globl EXT(uft_cuttrace)
+LEXT(uft_cuttrace)
+uftCutTrace:
+ b uftct64 ; patched to NOP if 32-bit processor
+
+ stw r20,tempr0(r11) ; Save some work registers
+ lwz r20,dgFlags(0) ; Get the flags
+ stw r21,tempr1(r11) ; Save some work registers
+ mfsrr1 r21 ; Get the SRR1
+ rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
+ stw r25,tempr2(r11) ; Save some work registers
+ orc r20,r20,r21 ; Get ~PR | FC
+ mfcr r25 ; Save the CR
+ stw r22,tempr3(r11) ; Save some work registers
+ lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq is we are in problem state and the validity bit is not set
+ stw r23,tempr4(r11) ; Save some work registers
+ lwz r23,traceMask(0) ; Get the trace mask
+ stw r24,tempr5(r11) ; Save some work registers
+ beq- ctbail32 ; Can not issue from user...
+
+
+ addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
+ rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
+ and. r24,r24,r23 ; See if both are on
+
+;
+; We select a trace entry using a compare and swap on the next entry field.
+; Since we do not lock the actual trace buffer, there is a potential that
+; another processor could wrap an trash our entry. Who cares?
+;
+
+ li r23,trcWork ; Get the trace work area address
+ lwz r21,traceStart(0) ; Get the start of trace table
+ lwz r22,traceEnd(0) ; Get end of trace table
+
+ beq-- ctdisa32 ; Leave because tracing is disabled...
+
+ctgte32: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
+ addi r24,r20,LTR_size ; Point to the next trace entry
+ cmplw r24,r22 ; Do we need to wrap the trace table?
+ bne+ ctgte32s ; No wrap, we got us a trace entry...
+
+ mr r24,r21 ; Wrap back to start
+
+ctgte32s: stwcx. r24,0,r23 ; Try to update the current pointer
+ bne- ctgte32 ; Collision, try again...
+
+#if ESPDEBUG
+ dcbf 0,r23 ; Force to memory
+ sync
+#endif
+
+ dcbz 0,r20 ; Clear and allocate first trace line
+ li r24,32 ; Offset to next line
+
+ctgte32tb: mftbu r21 ; Get the upper time now
+ mftb r22 ; Get the lower time now
+ mftbu r23 ; Get upper again
+ cmplw r21,r23 ; Has it ticked?
+ bne- ctgte32tb ; Yes, start again...
+
+ dcbz r24,r20 ; Clean second line
+
+;
+; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ li r23,T_SYSTEM_CALL ; Get the system call id
+ mtctr r13 ; Restore the caller's CTR
+ sth r24,LTR_cpu(r20) ; Save processor number
+ li r24,64 ; Offset to third line
+ sth r23,LTR_excpt(r20) ; Set the exception code
+ dcbz r24,r20 ; Clean 3rd line
+ mfspr r23,dsisr ; Get the DSISR
+ stw r21,LTR_timeHi(r20) ; Save top of time stamp
+ li r24,96 ; Offset to fourth line
+ mflr r21 ; Get the LR
+ dcbz r24,r20 ; Clean 4th line
+ stw r22,LTR_timeLo(r20) ; Save bottom of time stamp
+ mfsrr0 r22 ; Get SRR0
+ stw r25,LTR_cr(r20) ; Save CR
+ mfsrr1 r24 ; Get the SRR1
+ stw r23,LTR_dsisr(r20) ; Save DSISR
+ stw r22,LTR_srr0+4(r20) ; Save SRR0
+ mfdar r23 ; Get DAR
+ stw r24,LTR_srr1+4(r20) ; Save SRR1
+ stw r23,LTR_dar+4(r20) ; Save DAR
+ stw r21,LTR_lr+4(r20) ; Save LR
+
+ stw r13,LTR_ctr+4(r20) ; Save CTR
+ stw r0,LTR_r0+4(r20) ; Save register
+ stw r1,LTR_r1+4(r20) ; Save register
+ stw r2,LTR_r2+4(r20) ; Save register
+ stw r3,LTR_r3+4(r20) ; Save register
+ stw r4,LTR_r4+4(r20) ; Save register
+ stw r5,LTR_r5+4(r20) ; Save register
+ stw r6,LTR_r6+4(r20) ; Save register
+
+#if 0
+ lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
+ stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
+
+#if ESPDEBUG
+ addi r21,r20,32 ; Second line
+ addi r22,r20,64 ; Third line
+ dcbst 0,r20 ; Force to memory
+ dcbst 0,r21 ; Force to memory
+ addi r21,r22,32 ; Fourth line
+ dcbst 0,r22 ; Force to memory
+ dcbst 0,r21 ; Force to memory
+ sync ; Make sure it all goes
+#endif
+
+ctdisa32: mtcrf 0x80,r25 ; Restore the used condition register field
+ lwz r20,tempr0(r11) ; Restore work register
+ lwz r21,tempr1(r11) ; Restore work register
+ lwz r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ lwz r22,tempr3(r11) ; Restore work register
+ lwz r23,tempr4(r11) ; Restore work register
+ lwz r24,tempr5(r11) ; Restore work register
+ b uftX32 ; Go restore the rest and go...
+
+ctbail32: mtcrf 0x80,r25 ; Restore the used condition register field
+ lwz r20,tempr0(r11) ; Restore work register
+ lwz r21,tempr1(r11) ; Restore work register
+ lwz r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ lwz r22,tempr3(r11) ; Restore work register
+ lwz r23,tempr4(r11) ; Restore work register
+ b uftNormalSyscall ; Go pass it on along...
+
+;
+; This is the 64-bit version.
+;
+
+uftct64: std r20,tempr0(r11) ; Save some work registers
+ lwz r20,dgFlags(0) ; Get the flags
+ std r21,tempr1(r11) ; Save some work registers
+ mfsrr1 r21 ; Get the SRR1
+ rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
+ std r25,tempr2(r11) ; Save some work registers
+ orc r20,r20,r21 ; Get ~PR | FC
+ mfcr r25 ; Save the CR
+ std r22,tempr3(r11) ; Save some work registers
+ lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq when we are in problem state and the validity bit is not set
+ std r23,tempr4(r11) ; Save some work registers
+ lwz r23,traceMask(0) ; Get the trace mask
+ std r24,tempr5(r11) ; Save some work registers
+ beq-- ctbail64 ; Can not issue from user...
+
+ addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
+ rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
+ and. r24,r24,r23 ; See if both are on
+
+;
+; We select a trace entry using a compare and swap on the next entry field.
+; Since we do not lock the actual trace buffer, there is a potential that
+; another processor could wrap an trash our entry. Who cares?
+;
+
+ li r23,trcWork ; Get the trace work area address
+ lwz r21,traceStart(0) ; Get the start of trace table
+ lwz r22,traceEnd(0) ; Get end of trace table
+
+ beq-- ctdisa64 ; Leave because tracing is disabled...
+
+ctgte64: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
+ addi r24,r20,LTR_size ; Point to the next trace entry
+ cmplw r24,r22 ; Do we need to wrap the trace table?
+ bne++ ctgte64s ; No wrap, we got us a trace entry...
+
+ mr r24,r21 ; Wrap back to start
+
+ctgte64s: stwcx. r24,0,r23 ; Try to update the current pointer
+ bne-- ctgte64 ; Collision, try again...
+
+#if ESPDEBUG
+ dcbf 0,r23 ; Force to memory
+ sync
+#endif
+
+ dcbz128 0,r20 ; Zap the trace entry
+
+ mftb r21 ; Get the time
+
+;
+; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ li r23,T_SYSTEM_CALL ; Get the system call id
+ sth r24,LTR_cpu(r20) ; Save processor number
+ sth r23,LTR_excpt(r20) ; Set the exception code
+ mfspr r23,dsisr ; Get the DSISR
+ std r21,LTR_timeHi(r20) ; Save top of time stamp
+ mflr r21 ; Get the LR
+ mfsrr0 r22 ; Get SRR0
+ stw r25,LTR_cr(r20) ; Save CR
+ mfsrr1 r24 ; Get the SRR1
+ stw r23,LTR_dsisr(r20) ; Save DSISR
+ std r22,LTR_srr0(r20) ; Save SRR0
+ mfdar r23 ; Get DAR
+ std r24,LTR_srr1(r20) ; Save SRR1
+ std r23,LTR_dar(r20) ; Save DAR
+ std r21,LTR_lr(r20) ; Save LR
+
+ std r13,LTR_ctr(r20) ; Save CTR
+ std r0,LTR_r0(r20) ; Save register
+ std r1,LTR_r1(r20) ; Save register
+ std r2,LTR_r2(r20) ; Save register
+ std r3,LTR_r3(r20) ; Save register
+ std r4,LTR_r4(r20) ; Save register
+ std r5,LTR_r5(r20) ; Save register
+ std r6,LTR_r6(r20) ; Save register
+
+#if 0
+ lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
+ stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
+
+#if ESPDEBUG
+ dcbf 0,r20 ; Force to memory
+ sync ; Make sure it all goes
+#endif
+
+ctdisa64: mtcrf 0x80,r25 ; Restore the used condition register field
+ ld r20,tempr0(r11) ; Restore work register
+ ld r21,tempr1(r11) ; Restore work register
+ ld r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ ld r22,tempr3(r11) ; Restore work register
+ ld r23,tempr4(r11) ; Restore work register
+ ld r24,tempr5(r11) ; Restore work register
+ b uftX64 ; Go restore the rest and go...
+
+ctbail64: mtcrf 0x80,r25 ; Restore the used condition register field
+ ld r20,tempr0(r11) ; Restore work register
+ ld r21,tempr1(r11) ; Restore work register
+ ld r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ ld r22,tempr3(r11) ; Restore work register
+ ld r23,tempr4(r11) ; Restore work register
+ li r11,T_SYSTEM_CALL|T_FAM ; Set system code call
+ b extEntry64 ; Go straight to the 64-bit code...
+
+
+
+; Handle a system call that is not a UFT and which thus goes upstairs.
+
+uftNormalFF: ; here with entire cr in r13
+ mtcr r13 ; restore all 8 fields
+ b uftNormalSyscall1 ; Join common...
+
+uftNormal80: ; here with callers cr0 in r13
+ mtcrf 0x80,r13 ; restore cr0
+ b uftNormalSyscall1 ; Join common...
+
+uftNormalSyscall: ; r13 = callers ctr
+ mtctr r13 ; restore ctr
+uftNormalSyscall1:
+ li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
+
+
+/*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>*/
/*
* .L_exception_entry(type)
*
- * This is the common exception handling routine called by any
- * type of system exception.
+ * Come here via branch directly from the vector, or falling down from above, with the following
+ * set up:
*
- * ENTRY: via a system exception handler, thus interrupts off, VM off.
- * r3 has been saved in sprg3 and now contains a number
- * representing the exception's origins
+ * ENTRY: interrupts off, VM off, in 64-bit mode if supported
+ * Caller's r13 saved in sprg2.
+ * Caller's r11 saved in sprg3.
+ * Exception code (ie, T_SYSTEM_CALL etc) in r11.
+ * All other registers are live.
*
*/
-
- .data
- .align ALIGN
- .globl EXT(exception_entry)
-EXT(exception_entry):
- .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */
-
- VECTOR_SEGMENT
- .align 5
-.L_exception_entry:
+.L_exception_entry: ; WARNING: can fall through from UFT handler
/*
*
* misses, so these stores won't take all that long. Except the first line that is because
* we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
* off also.
- *
+ *
* Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
* are ignored.
*/
LEXT(extPatch32)
- b extEntry64 ; Go do 64-bit (patched out for 32-bit)
+ b extEntry64 ; Go do 64-bit (patched to a nop if 32-bit)
mfsprg r13,0 ; Load per_proc
lwz r13,next_savearea+4(r13) ; Get the exception save area
stw r0,saver0+4(r13) ; Save register 0
lwz r25,traceMask(0) ; Get the trace mask
li r0,SAVgeneral ; Get the savearea type value
lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
- rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
+ rlwinm r22,r11,30,0,31 ; Divide interrupt code by 4
stb r0,SAVflags+2(r13) ; Mark valid context
addi r22,r22,10 ; Adjust code so we shift into CR5
li r23,trcWork ; Get the trace work area address
;
; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for everything but the CutTrace call.
+; An identical entry is made during normal CutTrace processing. Any entry
+; format changes made must be done in both places.
;
lwz r16,ruptStamp(r2) ; Get top of time base
mfsrr1 r3 ; Load srr1
andc r11,r11,r1 ; Clear FAM bit
rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
- beq+ eEnoFAM ; From supervisor state
+ beq++ eEnoFAM ; From supervisor state
lwz r1,spcFlags(r2) ; Load spcFlags
rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit
cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode
li r0,SAVgeneral ; Get the savearea type value
lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
stb r0,SAVflags+2(r13) ; Mark valid context
- ori r23,r23,lo16(EXT(trcWork)) ; Get the rest
rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
li r23,trcWork ; Get the trace work area address
addi r22,r22,10 ; Adjust code so we shift into CR5
addi r22,r20,LTR_size ; Point to the next trace entry
cmplw r22,r26 ; Do we need to wrap the trace table?
- bne+ gotTrcEntSF ; No wrap, we got us a trace entry...
+ bne++ gotTrcEntSF ; No wrap, we got us a trace entry...
mr r22,r25 ; Wrap back to start
;
; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for everything but the CutTrace call.
+; An identical entry is made during normal CutTrace processing. Any entry
+; format changes made must be done in both places.
;
dcbz128 0,r20 ; Zap the trace entry
+ lwz r9,SAVflags(r13) ; Get savearea flags
+
ld r16,ruptStamp(r2) ; Get top of time base
ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not)
std r16,LTR_timeHi(r20) ; Set the upper part of TB
ld r1,saver1(r13) ; Get back interrupt time R1
+ rlwinm r9,r9,20,16,23 ; Isolate the special flags
ld r18,saver2(r13) ; Get back interrupt time R2
std r0,LTR_r0(r20) ; Save off register 0
+ rlwimi r9,r19,0,24,31 ; Slide in the cpu number
ld r3,saver3(r13) ; Restore this one
- sth r19,LTR_cpu(r20) ; Stash the cpu number
+ sth r9,LTR_cpu(r20) ; Stash the cpu number and special flags
std r1,LTR_r1(r20) ; Save off register 1
ld r4,saver4(r13) ; Restore this one
std r18,LTR_r2(r20) ; Save off register 2
std r13,LTR_save(r20) ; Save the savearea
stw r17,LTR_dsisr(r20) ; Save the DSISR
sth r11,LTR_excpt(r20) ; Save the exception type
+#if 0
+ lwz r17,FPUowner(r2) ; (TEST/DEBUG) Get the current floating point owner
+ stw r17,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
#if ESPDEBUG
dcbf 0,r20 ; Force to memory
lwz r0,saver0+4(r13) ; Get back interrupt time syscall number
mfsprg r2,0 ; Restore per_proc
- li r20,lo16(xcpTable) ; Point to the vector table (note: this must be in 1st 64k of physical memory)
+ lwz r20,lo16(xcpTable)(r11) ; Get the interrupt handler (note: xcpTable must be in 1st 32k of physical memory)
la r12,hwCounts(r2) ; Point to the exception count area
+ andis. r24,r22,hi16(SAVeat) ; Should we eat this one?
rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving
add r12,r12,r11 ; Point to the count
- lwzx r20,r20,r11 ; Get the interrupt handler
lwz r25,0(r12) ; Get the old value
lwz r23,hwRedrives(r2) ; Get the redrive count
+ crmove cr3_eq,cr0_eq ; Remember if we are ignoring
xori r24,r22,1 ; Get the NOT of the redrive
mtctr r20 ; Point to the interrupt handler
mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code
add r25,r25,r24 ; Count this one if not a redrive
- add r23,r23,r24 ; Count this one if if is a redrive
+ add r23,r23,r22 ; Count this one if if is a redrive
crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x
stw r25,0(r12) ; Store it back
stw r23,hwRedrives(r2) ; Save the redrive count
+ bne-- cr3,IgnoreRupt ; Interruption is being ignored...
bctr ; Go process the exception...
;
-; Exception vector filter table
+; Exception vector filter table (like everything in this file, must be in 1st 32KB of physical memory)
;
.align 7
.long WhoaBaby ; T_SOFT_PATCH
.long WhoaBaby ; T_MAINTENANCE
.long WhoaBaby ; T_INSTRUMENTATION
-
+ .long WhoaBaby ; T_ARCHDEP0
+ .long EatRupt ; T_HDEC
;
-; Just what the heck happened here????
+; Just what the heck happened here????
+; NB: also get here from UFT dispatch table, on bogus index
;
+
+WhoaBaby: b . ; Open the hood and wait for help
.align 5
-WhoaBaby: b . ; Open the hood and wait for help
+IgnoreRupt:
+ lwz r20,hwIgnored(r2) ; Grab the ignored interruption count
+ addi r20,r20,1 ; Count this one
+ stw r20,hwIgnored(r2) ; Save the ignored count
+ b EatRupt ; Ignore it...
+
;
LEXT(FCReturn)
cmplwi r3,T_IN_VAIN ; Was it handled?
- beq+ EatRupt ; Interrupt was handled...
+ beq++ EatRupt ; Interrupt was handled...
mr r11,r3 ; Put the rupt code into the right register
b Redrive ; Go through the filter again...
mfspr r8,scomc ; Get back the status (we just ignore it)
sync
isync
+
+ lis r8,l2FIR ; Get the L2 FIR register address
+ ori r8,r8,0x8000 ; Set to read data
+
+ sync
+
+ mtspr scomc,r8 ; Request the L2 FIR
+ mfspr r26,scomd ; Get the source
+ mfspr r8,scomc ; Get back the status (we just ignore it)
+ sync
+ isync
+
+ lis r8,l2FIRrst ; Get the L2 FIR AND mask address
+
+ sync
+
+ mtspr scomd,r9 ; Set the AND mask to 0
+ mtspr scomc,r8 ; Write the AND mask and clear conditions
+ mfspr r8,scomc ; Get back the status (we just ignore it)
+ sync
+ isync
+
+ lis r8,busFIR ; Get the Bus FIR register address
+ ori r8,r8,0x8000 ; Set to read data
+
+ sync
+
+ mtspr scomc,r8 ; Request the Bus FIR
+ mfspr r27,scomd ; Get the source
+ mfspr r8,scomc ; Get back the status (we just ignore it)
+ sync
+ isync
+
+ lis r8,busFIRrst ; Get the Bus FIR AND mask address
+
+ sync
+
+ mtspr scomd,r9 ; Set the AND mask to 0
+ mtspr scomc,r8 ; Write the AND mask and clear conditions
+ mfspr r8,scomc ; Get back the status (we just ignore it)
+ sync
+ isync
; Note: bug in early chips where scom reads are shifted right by 1. We fix that here.
; Also note that we will lose bit 63
beq++ mckNoFix ; No fix up is needed
sldi r24,r24,1 ; Shift left 1
sldi r25,r25,1 ; Shift left 1
+ sldi r26,r26,1 ; Shift left 1
+ sldi r27,r27,1 ; Shift left 1
-mckNoFix: std r24,savemisc0(r13) ; Save the MCK source in case we pass the error
- std r25,savemisc1(r13) ; Save the Core FIR in case we pass the error
+mckNoFix: std r24,savexdat0(r13) ; Save the MCK source in case we pass the error
+ std r25,savexdat1(r13) ; Save the Core FIR in case we pass the error
+ std r26,savexdat2(r13) ; Save the L2 FIR in case we pass the error
+ std r27,savexdat3(r13) ; Save the BUS FIR in case we pass the error
rlwinm. r0,r20,0,mckIFUE-32,mckIFUE-32 ; Is this some kind of uncorrectable?
bne mckUE ; Yeah...
isync
tlbiel r23 ; Locally invalidate TLB entry for iaddr
sync ; Wait for it
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
; SLB parity error. This could be software caused. We get one if there is
; more than 1 valid SLBE with a matching ESID. That one we do not want to
bne++ cr1,mckSLBclr ; Yup....
sth r3,ppInvSeg(r2) ; Store non-zero to trigger SLB reload
- bne++ EatRupt ; This was not a programming error, all recovered...
- b PassUpTrap ; Pass the software error up...
+ bne++ ceMck ; This was not a programming error, all recovered...
+ b ueMck ; Pass the software error up...
;
; Handle a load/store unit error. We need to decode the DSISR
addi r21,r21,1 ; Count this one
stw r21,0(r9) ; Stick it back
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
;
; When we come here, we are not quite sure what the error is. We need to
mckUnk: lwz r21,hwMckUnk(r2) ; Get unknown error count
addi r21,r21,1 ; Count it
stw r21,hwMckUnk(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+ b ueMck ; Go south, young man...
;
; Hang recovery. This is just a notification so we only count.
lwz r21,hwMckHang(r2) ; Get hang recovery count
addi r21,r21,1 ; Count this one
stw r21,hwMckHang(r2) ; Stick it back
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
;
; Externally signaled MCK. No recovery for the moment, but we this may be
lwz r21,hwMckHang(r2) ; Get hang recovery count
addi r21,r21,1 ; Count this one
stw r21,hwMckHang(r2) ; Stick it back
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
;
; Machine check cause is in a FIR. Suss it out here.
lwz r5,0(r19) ; Get the counter
addi r5,r5,1 ; Count it
stw r5,0(r19) ; Stuff it back
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
; General recovery for ERAT problems - handled in exception vector already
mckInvERAT: lwz r21,0(r19) ; Get the exception count spot
addi r21,r21,1 ; Count this one
stw r21,0(r19) ; Save count
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
; General hang recovery - this is a notification only, just count.
lwz r21,hwMckHang(r2) ; Get hang recovery count
addi r21,r21,1 ; Count this one
stw r21,hwMckHang(r2) ; Stick it back
- b EatRupt ; All recovered...
+ b ceMck ; All recovered...
;
mckUE: lwz r21,hwMckUE(r2) ; Get general uncorrectable error count
addi r21,r21,1 ; Count it
stw r21,hwMckUE(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+ b ueMck ; Go south, young man...
mckhIFUE: lwz r21,hwMckIUEr(r2) ; Get I-Fetch TLB reload uncorrectable error count
addi r21,r21,1 ; Count it
stw r21,hwMckIUEr(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+ b ueMck ; Go south, young man...
mckDUE: lwz r21,hwMckDUE(r2) ; Get deferred uncorrectable error count
addi r21,r21,1 ; Count it
cmpld r23,r8 ; Too soon?
cmpld cr1,r23,r9 ; Too late?
- cror cr0_lt,cr0_lt,cr1_gt ; Too soo or too late?
+ cror cr0_lt,cr0_lt,cr1_gt ; Too soon or too late?
ld r3,saver12(r13) ; Get the original MSR
ld r5,savelr(r13) ; Get the return address
li r4,0 ; Get fail code
- blt-- PassUpTrap ; This is a normal machine check, just pass up...
+ blt-- ueMck ; This is a normal machine check, just pass up...
std r5,savesrr0(r13) ; Set the return MSR
std r3,savesrr1(r13) ; Set the return address
std r4,saver3(r13) ; Set failure return code
- b EatRupt ; Go return from ml_probe_read_64...
+ b ceMck ; All recovered...
mckDTW: lwz r21,hwMckDTW(r2) ; Get deferred tablewalk uncorrectable error count
addi r21,r21,1 ; Count it
stw r21,hwMckDTW(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+ b ueMck ; Go south, young man...
mckL1D: lwz r21,hwMckL1DPE(r2) ; Get data cache parity error count
addi r21,r21,1 ; Count it
stw r21,hwMckL1DPE(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+ b ceMck ; All recovered...
mckL1T: lwz r21,hwMckL1TPE(r2) ; Get TLB parity error count
addi r21,r21,1 ; Count it
stw r21,hwMckL1TPE(r2) ; Stuff it
- b PassUpTrap ; Go south, young man...
+
+ceMck: lwz r21,mckFlags(0) ; Get the flags
+ li r0,1 ; Set the recovered flag before passing up
+ rlwinm. r21,r21,0,31,31 ; Check if we want to log recoverables
+ stw r0,savemisc3(r13) ; Set it
+ beq++ EatRupt ; No log of recoverables wanted...
+ b PassUpTrap ; Go up and log error...
+
+ueMck: li r0,0 ; Set the unrecovered flag before passing up
+ stw r0,savemisc3(r13) ; Set it
+ b PassUpTrap ; Go up and log error and probably panic
/*
lwz r5,SACvrswap+4(r5) ; Get real to virtual conversion
or r21,r21,r3 ; Keep the trace bits if they are on
stw r10,SAVflags(r13) ; Set the flags with the cleared redrive flag
- mr r3,r11 ; Pass the exception code in the paramter reg
+
xor r4,r13,r5 ; Pass up the virtual address of context savearea
mfsprg r29,0 ; Get the per_proc block back
rlwinm r4,r4,0,0,31 ; Clean top half of virtual savearea if 64-bit
ld r29,saver29(r31) ; Restore R29
lwz r31,pfAvailable(r30) ; Get the feature flags
- lwz r30,UAW(r30) ; Get the User Assist Word
+ ld r30,UAW(r30) ; Get the User Assist DoubleWord
mtsprg 2,r31 ; Set the feature flags
mfsprg r31,3 ; Restore R31
mtsprg 3,r30 ; Set the UAW
beq+ EatRupt ; No redrive, just exit...
+0: mftbu r2 ; Avoid using an obsolete timestamp for the redrive
+ mftb r4
+ mftbu r0
+ cmplw r0,r2
+ bne-- 0b
+
+ stw r2,SAVtime(r13)
+ stw r4,SAVtime+4(r13)
+
lwz r11,saveexception(r13) ; Restore exception code
b Redrive ; Redrive the exception...
;
. = 0x5000
- .globl EXT(lowGlo)
-EXT(lowGlo):
-
.ascii "Hagfish " ; 5000 Unique eyecatcher
.long 0 ; 5008 Zero
.long 0 ; 500C Zero cont...
- .long EXT(per_proc_info) ; 5010 pointer to per_procs
- .long 0 ; 5014 reserved
- .long 0 ; 5018 reserved
- .long 0 ; 501C reserved
- .long 0 ; 5020 reserved
- .long 0 ; 5024 reserved
- .long 0 ; 5028 reserved
- .long 0 ; 502C reserved
- .long 0 ; 5030 reserved
+ .long EXT(PerProcTable) ; 5010 pointer to per_proc_entry table
+ .long 0 ; 5014 Zero
+
+ .globl EXT(mckFlags)
+EXT(mckFlags):
+ .long 0 ; 5018 Machine check flags
+
+ .long EXT(version) ; 501C Pointer to kernel version string
+ .long 0 ; 5020 physical memory window virtual address
+ .long 0 ; 5024 physical memory window virtual address
+ .long 0 ; 5028 user memory window virtual address
+ .long 0 ; 502C user memory window virtual address
+ .long 0 ; 5030 VMM boot-args forced feature flags
.long 0 ; 5034 reserved
.long 0 ; 5038 reserved
.long 0 ; 503C reserved
.globl EXT(dgWork)
EXT(dgWork):
-
.long 0 ; 5200 dgLock
.long 0 ; 5204 dgFlags
.long 0 ; 5208 dgMisc0
.long 0 ; 5214 dgMisc3
.long 0 ; 5218 dgMisc4
.long 0 ; 521C dgMisc5
-
- .long 0 ; 5220 reserved
+
+ .globl EXT(LcksOpts)
+EXT(LcksOpts):
+ .long 0 ; 5220 lcksWork
.long 0 ; 5224 reserved
.long 0 ; 5228 reserved
.long 0 ; 522C reserved
.long 0 ; 5278 reserved
.long 0 ; 527C reserved
- .long 0 ; 5280 reserved
- .long 0 ; 5284 reserved
- .long 0 ; 5288 reserved
- .long 0 ; 528C reserved
- .long 0 ; 5290 reserved
- .long 0 ; 5294 reserved
- .long 0 ; 5298 reserved
- .long 0 ; 529C reserved
+ .globl EXT(pPcfg)
+EXT(pPcfg):
+ .long 0x80000000 | (12 << 8) | 12 ; 5280 pcfDefPcfg - 4k
+ .long 0 ; 5284 pcfLargePcfg
+ .long 0 ; 5288 Non-primary page configurations
+ .long 0 ; 528C Non-primary page configurations
+ .long 0 ; 5290 Non-primary page configurations
+ .long 0 ; 5294 Non-primary page configurations
+ .long 0 ; 5298 Non-primary page configurations
+ .long 0 ; 529C Non-primary page configurations
+
.long 0 ; 52A0 reserved
.long 0 ; 52A4 reserved
.long 0 ; 52A8 reserved
.long 0 ; 53F4 reserved
.long 0 ; 53F8 reserved
.long 0 ; 53FC reserved
-
-
+ .long 0 ; 5400 reserved
+ .long 0 ; 5404 reserved
+ .long 0 ; 5408 reserved
+ .long 0 ; 540C reserved
+ .long 0 ; 5410 reserved
+ .long 0 ; 5414 reserved
+ .long 0 ; 5418 reserved
+ .long 0 ; 541C reserved
+ .long 0 ; 5420 reserved
+ .long 0 ; 5424 reserved
+ .long 0 ; 5428 reserved
+ .long 0 ; 542C reserved
+ .long 0 ; 5430 reserved
+ .long 0 ; 5434 reserved
+ .long 0 ; 5438 reserved
+ .long 0 ; 543C reserved
+ .long 0 ; 5440 reserved
+ .long 0 ; 5444 reserved
+ .long 0 ; 5448 reserved
+ .long 0 ; 544C reserved
+ .long 0 ; 5450 reserved
+ .long 0 ; 5454 reserved
+ .long 0 ; 5458 reserved
+ .long 0 ; 545C reserved
+ .long 0 ; 5460 reserved
+ .long 0 ; 5464 reserved
+ .long 0 ; 5468 reserved
+ .long 0 ; 546C reserved
+ .long 0 ; 5470 reserved
+ .long 0 ; 5474 reserved
+ .long 0 ; 5478 reserved
+ .long 0 ; 547C reserved
;
; The "shared page" is used for low-level debugging
;