- cmpwi r0,0 ; any live VRs?
- mtspr VRSave,r0
- beq+ LRestoreFPRs
- lvx v20,0,r6
- li r7,16*1
- lvx v21,r7,r6
- li r7,16*2
- lvx v22,r7,r6
- li r7,16*3
- lvx v23,r7,r6
- li r7,16*4
- lvx v24,r7,r6
- li r7,16*5
- lvx v25,r7,r6
- li r7,16*6
- lvx v26,r7,r6
- li r7,16*7
- lvx v27,r7,r6
- li r7,16*8
- lvx v28,r7,r6
- li r7,16*9
- lvx v29,r7,r6
- li r7,16*10
- lvx v30,r7,r6
- li r7,16*11
- lvx v31,r7,r6
- b LRestoreFPRs ; skip zeroing VRSave
-
- ; Zero VRSave iff Altivec is supported, but VRs were not in use
- ; at setjmp time. This covers the case where VRs are first used after
- ; the setjmp but before the longjmp, and where VRSave is nonzero at
- ; the longjmp. We need to zero it now, or it will always remain
- ; nonzero since they are sticky bits.
-
-LZeroVRSave:
-#if defined(__DYNAMIC__)
- bcl 20,31,1f
-1: mflr r9 ; get our address
- addis r6,r9,ha16(L__cpu_has_altivec$non_lazy_ptr - 1b)
- lwz r7,lo16(L__cpu_has_altivec$non_lazy_ptr - 1b)(r6)
- lwz r7,0(r7) ; load the flag
-#else
- lis r7, ha16(__cpu_has_altivec)
- lwz r7, lo16(__cpu_has_altivec)(r7)
-#endif
- cmpwi r7,0
- li r8,0
- beq LRestoreFPRs ; no Altivec, so skip
- mtspr VRSave,r8
+ beq-- cr2,LRestoreFPRs ; AltiVec not available so skip
+ cmpwi r0,0 ; any live VRs?
+ mtspr VRSave,r0 ; update VRSAVE whether 0 or not
+ beq++ LRestoreFPRs ; VRSAVE is 0 so no VRs to reload
+ lvx v20,0,r6
+ li r7,16*1
+ lvx v21,r7,r6
+ li r7,16*2
+ lvx v22,r7,r6
+ li r7,16*3
+ lvx v23,r7,r6
+ li r7,16*4
+ lvx v24,r7,r6
+ li r7,16*5
+ lvx v25,r7,r6
+ li r7,16*6
+ lvx v26,r7,r6
+ li r7,16*7
+ lvx v27,r7,r6
+ li r7,16*8
+ lvx v28,r7,r6
+ li r7,16*9
+ lvx v29,r7,r6
+ li r7,16*10
+ lvx v30,r7,r6
+ li r7,16*11
+ lvx v31,r7,r6