.L_handlerC00:
mtsprg 3,r11 ; Save R11
mtsprg 2,r13 ; Save R13
- rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
- xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
- addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
- cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
- cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
+ rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
+ xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
+ addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
+ cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
+ cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
xoris r0,r0,0x8000 ; Flip bit to make 0 iff 0x80000000
- rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
+ rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
cntlzw r13,r0 ; Set bit 0x20 iff 0x80000000
xoris r0,r0,0x8000 ; Flip bit to restore R0
rlwimi r11,r13,2,0x80 ; Set bit 0x80 iff CutTrace
- xori r13,r0,0x6004 ; start to check for 0x6004
- rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
- cntlzw r13,r13 ; set bit 0x20 iff 0x6004
- rlwinm r11,r11,0,0,30 ; clear out bit 31
- rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
- lhz r11,lo16(scTable)(r11) ; get branch address from sc table
- mfctr r13 ; save caller's ctr in r13
- mtctr r11 ; set up branch to syscall handler
- mfsprg r11,0 ; get per_proc, which most UFTs use
- bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
+ xori r13,r0,0x6004 ; start to check for 0x6004
+ rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
+ cntlzw r13,r13 ; set bit 0x20 iff 0x6004
+ rlwinm r11,r11,0,0,30 ; clear out bit 31
+ rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
+ lhz r11,lo16(scTable)(r11) ; get branch address from sc table
+ mfctr r13 ; save caller's ctr in r13
+ mtctr r11 ; set up branch to syscall handler
+ mfsprg r11,0 ; get per_proc, which most UFTs use
+ bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
/*
* Trace - generated by single stepping
* sprg3 = holds caller's r11
*/
-; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
+; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
uftVMM:
- mtctr r13 ; restore caller's ctr
- lwz r11,spcFlags(r11) ; get the special flags word from per_proc
- mfcr r13 ; save caller's entire cr (we use all fields below)
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13 ; save caller's entire cr (we use all fields below)
rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
- bne-- uftNormal80 ; not eligible for FAM UFTs
+ bne-- uftNormal80 ; not eligible for FAM UFTs
cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
- bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
+ bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
b EXT(vmm_ufp) ; handle UFT range of vmm_dispatch syscall
-
-; Handle blue box UFTs (syscalls -1 and -2).
+
+; Handle blue box UFTs (syscalls -1 and -2).
uftIsPreemptiveTask:
uftIsPreemptiveTaskEnv:
- mtctr r13 ; restore caller's ctr
- lwz r11,spcFlags(r11) ; get the special flags word from per_proc
- mfcr r13,0x80 ; save caller's cr0 so we can use it
- andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13,0x80 ; save caller's cr0 so we can use it
+ andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
- blt-- uftNormal80 ; No...
- cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
+ blt-- uftNormal80 ; No...
+ cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
mfsprg r11,0 ; Get the per proc once more
- bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
+ bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv (only difference)
- b uftRestoreThenRFI ; restore modified cr0 and return
+ b uftRestoreThenRFI ; restore modified cr0 and return
-; Handle "Thread Info" UFT (0x7FF2)
+; Handle "Thread Info" UFT (0x7FF2)
- .globl EXT(uft_uaw_nop_if_32bit)
+ .globl EXT(uft_uaw_nop_if_32bit)
uftThreadInfo:
- lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
+ lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
LEXT(uft_uaw_nop_if_32bit)
- ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
- mtctr r13 ; restore caller's ctr
- b uftRFI ; done
+ ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
-; Handle "Facility Status" UFT (0x7FF3)
+; Handle "Facility Status" UFT (0x7FF3)
uftFacilityStatus:
- lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
- mtctr r13 ; restore caller's ctr
- b uftRFI ; done
+ lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
-; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
+; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
uftLoadMSR:
- mfsrr1 r11 ; get caller's MSR
- mtctr r13 ; restore caller's ctr
- mfcr r13,0x80 ; save caller's cr0 so we can test PR
- rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
- bne- uftNormal80 ; do not permit from user mode
- mfsprg r11,0 ; restore per_proc
+ mfsrr1 r11 ; get caller's MSR
+ mtctr r13 ; restore caller's ctr
+ mfcr r13,0x80 ; save caller's cr0 so we can test PR
+ rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
+ bne- uftNormal80 ; do not permit from user mode
+ mfsprg r11,0 ; restore per_proc
mtsrr1 r3 ; Set new MSR
-; Return to caller after UFT. When called:
-; r11 = per_proc ptr
-; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
-; sprg2 = callers r13
-; sprg3 = callers r11
+; Return to caller after UFT. When called:
+; r11 = per_proc ptr
+; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
+; sprg2 = callers r13
+; sprg3 = callers r11
-uftRestoreThenRFI: ; WARNING: can drop down to here
- mtcrf 0x80,r13 ; restore caller's cr0
+uftRestoreThenRFI: ; WARNING: can drop down to here
+ mtcrf 0x80,r13 ; restore caller's cr0
uftRFI:
- .globl EXT(uft_nop_if_32bit)
+ .globl EXT(uft_nop_if_32bit)
LEXT(uft_nop_if_32bit)
- b uftX64 ; patched to NOP if 32-bit processor
+ b uftX64 ; patched to NOP if 32-bit processor
uftX32: lwz r11,pfAvailable(r11) ; Get the feature flags
mfsprg r13,2 ; Restore R13
-; Handle a system call that is not a UFT and which thus goes upstairs.
+; Handle a system call that is not a UFT and which thus goes upstairs.
-uftNormalFF: ; here with entire cr in r13
- mtcr r13 ; restore all 8 fields
+uftNormalFF: ; here with entire cr in r13
+ mtcr r13 ; restore all 8 fields
b uftNormalSyscall1 ; Join common...
-
-uftNormal80: ; here with callers cr0 in r13
- mtcrf 0x80,r13 ; restore cr0
+
+uftNormal80: ; here with callers cr0 in r13
+ mtcrf 0x80,r13 ; restore cr0
b uftNormalSyscall1 ; Join common...
-
-uftNormalSyscall: ; r13 = callers ctr
- mtctr r13 ; restore ctr
+
+uftNormalSyscall: ; r13 = callers ctr
+ mtctr r13 ; restore ctr
uftNormalSyscall1:
- li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
+ li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
/*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>*/
* misses, so these stores won't take all that long. Except the first line that is because
* we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
* off also.
- *
+ *
* Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
* are ignored.
*/
lwz r25,traceMask(0) ; Get the trace mask
li r0,SAVgeneral ; Get the savearea type value
lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
- rlwinm r22,r11,30,0,31 ; Divide interrupt code by 4
+ rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
stb r0,SAVflags+2(r13) ; Mark valid context
addi r22,r22,10 ; Adjust code so we shift into CR5
li r23,trcWork ; Get the trace work area address
.long 0 ; 5028 user memory window virtual address
.long 0 ; 502C user memory window virtual address
.long 0 ; 5030 VMM boot-args forced feature flags
- .long 0 ; 5034 reserved
- .long 0 ; 5038 reserved
+
+ .globl EXT(maxDec)
+EXT(maxDec):
+ .long 0x7FFFFFFF ; 5034 maximum decrementer value
+
+
+ .globl EXT(pmsCtlp)
+EXT(pmsCtlp):
+ .long 0 ; 5038 Pointer to power management stepper control
+
.long 0 ; 503C reserved
.long 0 ; 5040 reserved
.long 0 ; 5044 reserved