2 * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
3 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
4 * Copyright (C) 2008 Apple Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerSH4_h
29 #define MacroAssemblerSH4_h
31 #if ENABLE(ASSEMBLER) && CPU(SH4)
33 #include "SH4Assembler.h"
34 #include "AbstractMacroAssembler.h"
35 #include <wtf/Assertions.h>
39 class MacroAssemblerSH4
: public AbstractMacroAssembler
<SH4Assembler
> {
41 typedef SH4Assembler::FPRegisterID FPRegisterID
;
43 static const Scale ScalePtr
= TimesFour
;
44 static const FPRegisterID fscratch
= SH4Registers::dr10
;
45 static const RegisterID stackPointerRegister
= SH4Registers::sp
;
46 static const RegisterID framePointerRegister
= SH4Registers::fp
;
47 static const RegisterID linkRegister
= SH4Registers::pr
;
48 static const RegisterID scratchReg3
= SH4Registers::r13
;
50 static const int MaximumCompactPtrAlignedAddressOffset
= 60;
52 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
54 return (value
>= 0) && (value
<= MaximumCompactPtrAlignedAddressOffset
) && (!(value
& 3));
57 enum RelationalCondition
{
58 Equal
= SH4Assembler::EQ
,
59 NotEqual
= SH4Assembler::NE
,
60 Above
= SH4Assembler::HI
,
61 AboveOrEqual
= SH4Assembler::HS
,
62 Below
= SH4Assembler::LI
,
63 BelowOrEqual
= SH4Assembler::LS
,
64 GreaterThan
= SH4Assembler::GT
,
65 GreaterThanOrEqual
= SH4Assembler::GE
,
66 LessThan
= SH4Assembler::LT
,
67 LessThanOrEqual
= SH4Assembler::LE
70 enum ResultCondition
{
71 Overflow
= SH4Assembler::OF
,
72 Signed
= SH4Assembler::SI
,
73 PositiveOrZero
= SH4Assembler::NS
,
74 Zero
= SH4Assembler::EQ
,
75 NonZero
= SH4Assembler::NE
78 enum DoubleCondition
{
79 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
80 DoubleEqual
= SH4Assembler::EQ
,
81 DoubleNotEqual
= SH4Assembler::NE
,
82 DoubleGreaterThan
= SH4Assembler::GT
,
83 DoubleGreaterThanOrEqual
= SH4Assembler::GE
,
84 DoubleLessThan
= SH4Assembler::LT
,
85 DoubleLessThanOrEqual
= SH4Assembler::LE
,
86 // If either operand is NaN, these conditions always evaluate to true.
87 DoubleEqualOrUnordered
= SH4Assembler::EQU
,
88 DoubleNotEqualOrUnordered
= SH4Assembler::NEU
,
89 DoubleGreaterThanOrUnordered
= SH4Assembler::GTU
,
90 DoubleGreaterThanOrEqualOrUnordered
= SH4Assembler::GEU
,
91 DoubleLessThanOrUnordered
= SH4Assembler::LTU
,
92 DoubleLessThanOrEqualOrUnordered
= SH4Assembler::LEU
,
95 RegisterID
claimScratch()
97 return m_assembler
.claimScratch();
100 void releaseScratch(RegisterID reg
)
102 m_assembler
.releaseScratch(reg
);
105 static RelationalCondition
invert(RelationalCondition cond
)
121 return LessThanOrEqual
;
122 case GreaterThanOrEqual
:
125 return GreaterThanOrEqual
;
126 case LessThanOrEqual
:
129 RELEASE_ASSERT_NOT_REACHED();
133 // Integer arithmetic operations
135 void add32(RegisterID src
, RegisterID dest
)
137 m_assembler
.addlRegReg(src
, dest
);
140 void add32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
150 void add32(TrustedImm32 imm
, RegisterID dest
)
155 if (m_assembler
.isImmediate(imm
.m_value
)) {
156 m_assembler
.addlImm8r(imm
.m_value
, dest
);
160 RegisterID scr
= claimScratch();
161 m_assembler
.loadConstant(imm
.m_value
, scr
);
162 m_assembler
.addlRegReg(scr
, dest
);
166 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
172 void add32(TrustedImm32 imm
, Address address
)
177 RegisterID scr
= claimScratch();
178 load32(address
, scr
);
180 store32(scr
, address
);
184 void add32(Address src
, RegisterID dest
)
186 RegisterID scr
= claimScratch();
188 m_assembler
.addlRegReg(scr
, dest
);
192 void add32(AbsoluteAddress src
, RegisterID dest
)
194 RegisterID scr
= claimScratch();
195 load32(src
.m_ptr
, scr
);
196 m_assembler
.addlRegReg(scr
, dest
);
200 void and32(RegisterID src
, RegisterID dest
)
202 m_assembler
.andlRegReg(src
, dest
);
205 void and32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
215 void and32(Address src
, RegisterID dest
)
217 RegisterID scr
= claimScratch();
223 void and32(TrustedImm32 imm
, RegisterID dest
)
226 m_assembler
.movImm8(0, dest
);
230 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
231 m_assembler
.andlImm8r(imm
.m_value
, dest
);
235 RegisterID scr
= claimScratch();
236 m_assembler
.loadConstant(imm
.m_value
, scr
);
237 m_assembler
.andlRegReg(scr
, dest
);
241 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
252 void lshift32(RegisterID shiftamount
, RegisterID dest
)
254 RegisterID shiftTmp
= claimScratch();
255 m_assembler
.loadConstant(0x1f, shiftTmp
);
256 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
257 m_assembler
.shldRegReg(dest
, shiftTmp
);
258 releaseScratch(shiftTmp
);
261 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
264 lshift32(shiftAmount
, dest
);
267 void lshift32(TrustedImm32 imm
, RegisterID dest
)
269 int immMasked
= imm
.m_value
& 0x1f;
273 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
274 m_assembler
.shllImm8r(immMasked
, dest
);
278 RegisterID shiftTmp
= claimScratch();
279 m_assembler
.loadConstant(immMasked
, shiftTmp
);
280 m_assembler
.shldRegReg(dest
, shiftTmp
);
281 releaseScratch(shiftTmp
);
284 void lshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
287 lshift32(shiftamount
, dest
);
290 void mul32(RegisterID src
, RegisterID dest
)
292 mul32(src
, dest
, dest
);
295 void mul32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
297 m_assembler
.imullRegReg(src1
, src2
);
298 m_assembler
.stsmacl(dest
);
301 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
304 RegisterID immval
= claimScratch();
307 releaseScratch(immval
);
314 void or32(RegisterID src
, RegisterID dest
)
316 m_assembler
.orlRegReg(src
, dest
);
319 void or32(TrustedImm32 imm
, RegisterID dest
)
321 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
322 m_assembler
.orlImm8r(imm
.m_value
, dest
);
326 RegisterID scr
= claimScratch();
327 m_assembler
.loadConstant(imm
.m_value
, scr
);
328 m_assembler
.orlRegReg(scr
, dest
);
332 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
336 else if (op1
== dest
)
344 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
355 void or32(RegisterID src
, AbsoluteAddress address
)
357 RegisterID destptr
= claimScratch();
358 move(TrustedImmPtr(address
.m_ptr
), destptr
);
359 RegisterID destval
= claimScratch();
360 m_assembler
.movlMemReg(destptr
, destval
);
361 m_assembler
.orlRegReg(src
, destval
);
362 m_assembler
.movlRegMem(destval
, destptr
);
363 releaseScratch(destval
);
364 releaseScratch(destptr
);
367 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
378 void rshift32(RegisterID shiftamount
, RegisterID dest
)
380 RegisterID shiftTmp
= claimScratch();
381 m_assembler
.loadConstant(0x1f, shiftTmp
);
382 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
383 m_assembler
.neg(shiftTmp
, shiftTmp
);
384 m_assembler
.shadRegReg(dest
, shiftTmp
);
385 releaseScratch(shiftTmp
);
388 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
391 rshift32(shiftAmount
, dest
);
394 void rshift32(TrustedImm32 imm
, RegisterID dest
)
396 int immMasked
= imm
.m_value
& 0x1f;
400 if (immMasked
== 1) {
401 m_assembler
.sharImm8r(immMasked
, dest
);
405 RegisterID shiftTmp
= claimScratch();
406 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
407 m_assembler
.shadRegReg(dest
, shiftTmp
);
408 releaseScratch(shiftTmp
);
411 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
417 void sub32(RegisterID src
, RegisterID dest
)
419 m_assembler
.sublRegReg(src
, dest
);
422 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
427 RegisterID result
= claimScratch();
428 RegisterID scratchReg
= claimScratch();
430 move(TrustedImmPtr(address
.m_ptr
), scratchReg
);
431 m_assembler
.movlMemReg(scratchReg
, result
);
433 if (m_assembler
.isImmediate(-imm
.m_value
))
434 m_assembler
.addlImm8r(-imm
.m_value
, result
);
436 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
437 m_assembler
.sublRegReg(scratchReg3
, result
);
440 store32(result
, scratchReg
);
441 releaseScratch(result
);
442 releaseScratch(scratchReg
);
445 void sub32(TrustedImm32 imm
, Address address
)
447 add32(TrustedImm32(-imm
.m_value
), address
);
450 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
455 RegisterID result
= claimScratch();
456 RegisterID scratchReg
= claimScratch();
458 move(TrustedImmPtr(address
.m_ptr
), scratchReg
);
459 m_assembler
.movlMemReg(scratchReg
, result
);
461 if (m_assembler
.isImmediate(imm
.m_value
))
462 m_assembler
.addlImm8r(imm
.m_value
, result
);
464 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
465 m_assembler
.addlRegReg(scratchReg3
, result
);
468 store32(result
, scratchReg
);
469 releaseScratch(result
);
470 releaseScratch(scratchReg
);
473 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
475 RegisterID scr1
= claimScratch();
476 RegisterID scr2
= claimScratch();
478 // Add 32-bit LSB first.
479 move(TrustedImmPtr(address
.m_ptr
), scratchReg3
);
480 m_assembler
.movlMemReg(scratchReg3
, scr1
); // scr1 = 32-bit LSB of int64 @ address
481 m_assembler
.loadConstant(imm
.m_value
, scr2
);
483 m_assembler
.addclRegReg(scr1
, scr2
);
484 m_assembler
.movlRegMem(scr2
, scratchReg3
); // Update address with 32-bit LSB result.
486 // Then add 32-bit MSB.
487 m_assembler
.addlImm8r(4, scratchReg3
);
488 m_assembler
.movlMemReg(scratchReg3
, scr1
); // scr1 = 32-bit MSB of int64 @ address
489 m_assembler
.movt(scr2
);
491 m_assembler
.addlImm8r(-1, scr2
); // Sign extend imm value if needed.
492 m_assembler
.addvlRegReg(scr2
, scr1
);
493 m_assembler
.movlRegMem(scr1
, scratchReg3
); // Update (address + 4) with 32-bit MSB result.
495 releaseScratch(scr2
);
496 releaseScratch(scr1
);
499 void sub32(TrustedImm32 imm
, RegisterID dest
)
504 if (m_assembler
.isImmediate(-imm
.m_value
)) {
505 m_assembler
.addlImm8r(-imm
.m_value
, dest
);
509 RegisterID scr
= claimScratch();
510 m_assembler
.loadConstant(imm
.m_value
, scr
);
511 m_assembler
.sublRegReg(scr
, dest
);
515 void sub32(Address src
, RegisterID dest
)
517 RegisterID scr
= claimScratch();
519 m_assembler
.sublRegReg(scr
, dest
);
523 void xor32(RegisterID src
, RegisterID dest
)
525 m_assembler
.xorlRegReg(src
, dest
);
528 void xor32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
538 void xor32(TrustedImm32 imm
, RegisterID srcDest
)
540 if (imm
.m_value
== -1) {
541 m_assembler
.notlReg(srcDest
, srcDest
);
545 if ((srcDest
!= SH4Registers::r0
) || (imm
.m_value
> 255) || (imm
.m_value
< 0)) {
546 RegisterID scr
= claimScratch();
547 m_assembler
.loadConstant(imm
.m_value
, scr
);
548 m_assembler
.xorlRegReg(scr
, srcDest
);
553 m_assembler
.xorlImm8r(imm
.m_value
, srcDest
);
556 void compare32(int imm
, RegisterID dst
, RelationalCondition cond
)
558 if (((cond
== Equal
) || (cond
== NotEqual
)) && (dst
== SH4Registers::r0
) && m_assembler
.isImmediate(imm
)) {
559 m_assembler
.cmpEqImmR0(imm
, dst
);
563 if (((cond
== Equal
) || (cond
== NotEqual
)) && !imm
) {
564 m_assembler
.testlRegReg(dst
, dst
);
568 RegisterID scr
= claimScratch();
569 m_assembler
.loadConstant(imm
, scr
);
570 m_assembler
.cmplRegReg(scr
, dst
, SH4Condition(cond
));
574 void compare32(int offset
, RegisterID base
, RegisterID left
, RelationalCondition cond
)
576 RegisterID scr
= claimScratch();
578 m_assembler
.movlMemReg(base
, scr
);
579 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
584 if ((offset
< 0) || (offset
>= 64)) {
585 m_assembler
.loadConstant(offset
, scr
);
586 m_assembler
.addlRegReg(base
, scr
);
587 m_assembler
.movlMemReg(scr
, scr
);
588 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
593 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
594 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
598 void testImm(int imm
, int offset
, RegisterID base
)
600 RegisterID scr
= claimScratch();
601 load32(base
, offset
, scr
);
603 RegisterID scr1
= claimScratch();
604 move(TrustedImm32(imm
), scr1
);
606 m_assembler
.testlRegReg(scr
, scr1
);
608 releaseScratch(scr1
);
611 void testlImm(int imm
, RegisterID dst
)
613 if ((dst
== SH4Registers::r0
) && (imm
<= 255) && (imm
>= 0)) {
614 m_assembler
.testlImm8r(imm
, dst
);
618 RegisterID scr
= claimScratch();
619 m_assembler
.loadConstant(imm
, scr
);
620 m_assembler
.testlRegReg(scr
, dst
);
624 void compare32(RegisterID right
, int offset
, RegisterID base
, RelationalCondition cond
)
627 RegisterID scr
= claimScratch();
628 m_assembler
.movlMemReg(base
, scr
);
629 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
634 if ((offset
< 0) || (offset
>= 64)) {
635 RegisterID scr
= claimScratch();
636 m_assembler
.loadConstant(offset
, scr
);
637 m_assembler
.addlRegReg(base
, scr
);
638 m_assembler
.movlMemReg(scr
, scr
);
639 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
644 RegisterID scr
= claimScratch();
645 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
646 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
650 void compare32(int imm
, int offset
, RegisterID base
, RelationalCondition cond
)
652 RegisterID scr
= claimScratch();
653 load32(base
, offset
, scr
);
655 RegisterID scr1
= claimScratch();
656 move(TrustedImm32(imm
), scr1
);
658 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
660 releaseScratch(scr1
);
664 // Memory access operation
666 ALWAYS_INLINE
void loadEffectiveAddress(BaseIndex address
, RegisterID dest
, int extraoffset
= 0)
668 if (dest
== address
.base
) {
669 RegisterID scaledIndex
= claimScratch();
670 move(address
.index
, scaledIndex
);
671 lshift32(TrustedImm32(address
.scale
), scaledIndex
);
672 add32(scaledIndex
, dest
);
673 releaseScratch(scaledIndex
);
675 move(address
.index
, dest
);
676 lshift32(TrustedImm32(address
.scale
), dest
);
677 add32(address
.base
, dest
);
680 add32(TrustedImm32(address
.offset
+ extraoffset
), dest
);
683 void load32(ImplicitAddress address
, RegisterID dest
)
685 load32(address
.base
, address
.offset
, dest
);
688 void load8(ImplicitAddress address
, RegisterID dest
)
690 load8(address
.base
, address
.offset
, dest
);
693 void load8(BaseIndex address
, RegisterID dest
)
695 RegisterID scr
= claimScratch();
696 move(address
.index
, scr
);
697 lshift32(TrustedImm32(address
.scale
), scr
);
698 add32(address
.base
, scr
);
699 load8(scr
, address
.offset
, dest
);
703 void load8(AbsoluteAddress address
, RegisterID dest
)
705 move(TrustedImmPtr(address
.m_ptr
), dest
);
706 m_assembler
.movbMemReg(dest
, dest
);
707 m_assembler
.extub(dest
, dest
);
710 void load8(const void* address
, RegisterID dest
)
712 load8(AbsoluteAddress(address
), dest
);
715 void load8PostInc(RegisterID base
, RegisterID dest
)
717 m_assembler
.movbMemRegIn(base
, dest
);
718 m_assembler
.extub(dest
, dest
);
721 void load8Signed(BaseIndex address
, RegisterID dest
)
723 RegisterID scr
= claimScratch();
724 move(address
.index
, scr
);
725 lshift32(TrustedImm32(address
.scale
), scr
);
726 add32(address
.base
, scr
);
727 load8Signed(scr
, address
.offset
, dest
);
731 void load32(BaseIndex address
, RegisterID dest
)
733 RegisterID scr
= claimScratch();
734 move(address
.index
, scr
);
735 lshift32(TrustedImm32(address
.scale
), scr
);
736 add32(address
.base
, scr
);
737 load32(scr
, address
.offset
, dest
);
741 void load32(const void* address
, RegisterID dest
)
743 move(TrustedImmPtr(address
), dest
);
744 m_assembler
.movlMemReg(dest
, dest
);
747 void load32(RegisterID base
, int offset
, RegisterID dest
)
750 m_assembler
.movlMemReg(base
, dest
);
754 if ((offset
>= 0) && (offset
< 64)) {
755 m_assembler
.movlMemReg(offset
>> 2, base
, dest
);
759 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
761 m_assembler
.loadConstant(offset
, scr
);
762 if (base
== SH4Registers::r0
)
763 m_assembler
.movlR0mr(scr
, dest
);
765 m_assembler
.addlRegReg(base
, scr
);
766 m_assembler
.movlMemReg(scr
, dest
);
773 void load8Signed(RegisterID base
, int offset
, RegisterID dest
)
776 m_assembler
.movbMemReg(base
, dest
);
780 if ((offset
> 0) && (offset
<= 15) && (dest
== SH4Registers::r0
)) {
781 m_assembler
.movbMemReg(offset
, base
, dest
);
785 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
787 m_assembler
.loadConstant(offset
, scr
);
788 if (base
== SH4Registers::r0
)
789 m_assembler
.movbR0mr(scr
, dest
);
791 m_assembler
.addlRegReg(base
, scr
);
792 m_assembler
.movbMemReg(scr
, dest
);
799 void load8(RegisterID base
, int offset
, RegisterID dest
)
801 load8Signed(base
, offset
, dest
);
802 m_assembler
.extub(dest
, dest
);
805 void load32(RegisterID src
, RegisterID dst
)
807 m_assembler
.movlMemReg(src
, dst
);
810 void load16(ImplicitAddress address
, RegisterID dest
)
812 if (!address
.offset
) {
813 m_assembler
.movwMemReg(address
.base
, dest
);
814 m_assembler
.extuw(dest
, dest
);
818 if ((address
.offset
> 0) && (address
.offset
<= 30) && (dest
== SH4Registers::r0
)) {
819 m_assembler
.movwMemReg(address
.offset
>> 1, address
.base
, dest
);
820 m_assembler
.extuw(dest
, dest
);
824 RegisterID scr
= (dest
== address
.base
) ? claimScratch() : dest
;
826 m_assembler
.loadConstant(address
.offset
, scr
);
827 if (address
.base
== SH4Registers::r0
)
828 m_assembler
.movwR0mr(scr
, dest
);
830 m_assembler
.addlRegReg(address
.base
, scr
);
831 m_assembler
.movwMemReg(scr
, dest
);
833 m_assembler
.extuw(dest
, dest
);
835 if (dest
== address
.base
)
839 void load16Unaligned(BaseIndex address
, RegisterID dest
)
841 RegisterID scr
= claimScratch();
843 loadEffectiveAddress(address
, scr
);
845 RegisterID scr1
= claimScratch();
846 load8PostInc(scr
, scr1
);
848 m_assembler
.shllImm8r(8, dest
);
852 releaseScratch(scr1
);
855 void load16(RegisterID src
, RegisterID dest
)
857 m_assembler
.movwMemReg(src
, dest
);
858 m_assembler
.extuw(dest
, dest
);
861 void load16Signed(RegisterID src
, RegisterID dest
)
863 m_assembler
.movwMemReg(src
, dest
);
866 void load16(BaseIndex address
, RegisterID dest
)
868 load16Signed(address
, dest
);
869 m_assembler
.extuw(dest
, dest
);
872 void load16PostInc(RegisterID base
, RegisterID dest
)
874 m_assembler
.movwMemRegIn(base
, dest
);
875 m_assembler
.extuw(dest
, dest
);
878 void load16Signed(BaseIndex address
, RegisterID dest
)
880 RegisterID scr
= claimScratch();
882 move(address
.index
, scr
);
883 lshift32(TrustedImm32(address
.scale
), scr
);
884 add32(TrustedImm32(address
.offset
), scr
);
886 if (address
.base
== SH4Registers::r0
)
887 m_assembler
.movwR0mr(scr
, dest
);
889 add32(address
.base
, scr
);
890 load16Signed(scr
, dest
);
896 void store8(RegisterID src
, BaseIndex address
)
898 RegisterID scr
= claimScratch();
900 move(address
.index
, scr
);
901 lshift32(TrustedImm32(address
.scale
), scr
);
902 add32(TrustedImm32(address
.offset
), scr
);
904 if (address
.base
== SH4Registers::r0
)
905 m_assembler
.movbRegMemr0(src
, scr
);
907 add32(address
.base
, scr
);
908 m_assembler
.movbRegMem(src
, scr
);
914 void store8(RegisterID src
, void* address
)
916 RegisterID destptr
= claimScratch();
917 move(TrustedImmPtr(address
), destptr
);
918 m_assembler
.movbRegMem(src
, destptr
);
919 releaseScratch(destptr
);
922 void store8(TrustedImm32 imm
, void* address
)
924 ASSERT((imm
.m_value
>= -128) && (imm
.m_value
<= 127));
925 RegisterID dstptr
= claimScratch();
926 move(TrustedImmPtr(address
), dstptr
);
927 RegisterID srcval
= claimScratch();
929 m_assembler
.movbRegMem(srcval
, dstptr
);
930 releaseScratch(dstptr
);
931 releaseScratch(srcval
);
934 void store16(RegisterID src
, BaseIndex address
)
936 RegisterID scr
= claimScratch();
938 move(address
.index
, scr
);
939 lshift32(TrustedImm32(address
.scale
), scr
);
940 add32(TrustedImm32(address
.offset
), scr
);
942 if (address
.base
== SH4Registers::r0
)
943 m_assembler
.movwRegMemr0(src
, scr
);
945 add32(address
.base
, scr
);
946 m_assembler
.movwRegMem(src
, scr
);
952 void store32(RegisterID src
, ImplicitAddress address
)
954 if (!address
.offset
) {
955 m_assembler
.movlRegMem(src
, address
.base
);
959 if ((address
.offset
>= 0) && (address
.offset
< 64)) {
960 m_assembler
.movlRegMem(src
, address
.offset
>> 2, address
.base
);
964 RegisterID scr
= claimScratch();
965 m_assembler
.loadConstant(address
.offset
, scr
);
966 if (address
.base
== SH4Registers::r0
)
967 m_assembler
.movlRegMemr0(src
, scr
);
969 m_assembler
.addlRegReg(address
.base
, scr
);
970 m_assembler
.movlRegMem(src
, scr
);
975 void store32(RegisterID src
, RegisterID dst
)
977 m_assembler
.movlRegMem(src
, dst
);
980 void store32(TrustedImm32 imm
, ImplicitAddress address
)
982 RegisterID scr
= claimScratch();
983 m_assembler
.loadConstant(imm
.m_value
, scr
);
984 store32(scr
, address
);
988 void store32(RegisterID src
, BaseIndex address
)
990 RegisterID scr
= claimScratch();
992 move(address
.index
, scr
);
993 lshift32(TrustedImm32(address
.scale
), scr
);
994 add32(address
.base
, scr
);
995 store32(src
, Address(scr
, address
.offset
));
1000 void store32(TrustedImm32 imm
, void* address
)
1002 RegisterID scr
= claimScratch();
1003 RegisterID scr1
= claimScratch();
1004 m_assembler
.loadConstant(imm
.m_value
, scr
);
1005 move(TrustedImmPtr(address
), scr1
);
1006 m_assembler
.movlRegMem(scr
, scr1
);
1007 releaseScratch(scr
);
1008 releaseScratch(scr1
);
1011 void store32(RegisterID src
, void* address
)
1013 RegisterID scr
= claimScratch();
1014 move(TrustedImmPtr(address
), scr
);
1015 m_assembler
.movlRegMem(src
, scr
);
1016 releaseScratch(scr
);
1019 void store32(TrustedImm32 imm
, BaseIndex address
)
1021 RegisterID destptr
= claimScratch();
1023 loadEffectiveAddress(address
, destptr
);
1025 RegisterID srcval
= claimScratch();
1027 m_assembler
.movlRegMem(srcval
, destptr
);
1028 releaseScratch(srcval
);
1029 releaseScratch(destptr
);
1032 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
1034 RegisterID scr
= claimScratch();
1035 DataLabel32
label(this);
1036 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
1037 m_assembler
.addlRegReg(address
.base
, scr
);
1038 m_assembler
.movlMemReg(scr
, dest
);
1039 releaseScratch(scr
);
1043 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
1045 RegisterID scr
= claimScratch();
1046 DataLabel32
label(this);
1047 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
1048 m_assembler
.addlRegReg(address
.base
, scr
);
1049 m_assembler
.movlRegMem(src
, scr
);
1050 releaseScratch(scr
);
1054 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
1056 DataLabelCompact
dataLabel(this);
1057 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
1058 m_assembler
.movlMemRegCompact(address
.offset
>> 2, address
.base
, dest
);
1062 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
1064 ConvertibleLoadLabel
result(this);
1066 RegisterID scr
= claimScratch();
1067 m_assembler
.movImm8(address
.offset
, scr
);
1068 m_assembler
.addlRegReg(address
.base
, scr
);
1069 m_assembler
.movlMemReg(scr
, dest
);
1070 releaseScratch(scr
);
1075 // Floating-point operations
1077 static bool supportsFloatingPoint() { return true; }
1078 static bool supportsFloatingPointTruncate() { return true; }
1079 static bool supportsFloatingPointSqrt() { return true; }
1080 static bool supportsFloatingPointAbs() { return true; }
1082 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
1084 m_assembler
.fldsfpul((FPRegisterID
)(src
+ 1));
1085 m_assembler
.stsfpulReg(dest1
);
1086 m_assembler
.fldsfpul(src
);
1087 m_assembler
.stsfpulReg(dest2
);
1090 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID
)
1092 m_assembler
.ldsrmfpul(src1
);
1093 m_assembler
.fstsfpul((FPRegisterID
)(dest
+ 1));
1094 m_assembler
.ldsrmfpul(src2
);
1095 m_assembler
.fstsfpul(dest
);
1098 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
1101 m_assembler
.fmovsRegReg((FPRegisterID
)(src
+ 1), (FPRegisterID
)(dest
+ 1));
1102 m_assembler
.fmovsRegReg(src
, dest
);
1106 void swapDouble(FPRegisterID fr1
, FPRegisterID fr2
)
1109 m_assembler
.fldsfpul((FPRegisterID
)(fr1
+ 1));
1110 m_assembler
.fmovsRegReg((FPRegisterID
)(fr2
+ 1), (FPRegisterID
)(fr1
+ 1));
1111 m_assembler
.fstsfpul((FPRegisterID
)(fr2
+ 1));
1112 m_assembler
.fldsfpul(fr1
);
1113 m_assembler
.fmovsRegReg(fr2
, fr1
);
1114 m_assembler
.fstsfpul(fr2
);
1118 void loadFloat(BaseIndex address
, FPRegisterID dest
)
1120 RegisterID scr
= claimScratch();
1122 loadEffectiveAddress(address
, scr
);
1124 m_assembler
.fmovsReadrm(scr
, dest
);
1125 releaseScratch(scr
);
1128 void loadDouble(BaseIndex address
, FPRegisterID dest
)
1130 RegisterID scr
= claimScratch();
1132 loadEffectiveAddress(address
, scr
);
1134 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1135 m_assembler
.fmovsReadrm(scr
, dest
);
1136 releaseScratch(scr
);
1139 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1141 RegisterID scr
= claimScratch();
1143 m_assembler
.loadConstant(address
.offset
, scr
);
1144 if (address
.base
== SH4Registers::r0
) {
1145 m_assembler
.fmovsReadr0r(scr
, (FPRegisterID
)(dest
+ 1));
1146 m_assembler
.addlImm8r(4, scr
);
1147 m_assembler
.fmovsReadr0r(scr
, dest
);
1148 releaseScratch(scr
);
1152 m_assembler
.addlRegReg(address
.base
, scr
);
1153 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1154 m_assembler
.fmovsReadrm(scr
, dest
);
1155 releaseScratch(scr
);
1158 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
1160 RegisterID scr
= claimScratch();
1162 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1163 m_assembler
.fmovsReadrm(scr
, dest
);
1164 releaseScratch(scr
);
1167 void storeFloat(FPRegisterID src
, BaseIndex address
)
1169 RegisterID scr
= claimScratch();
1170 loadEffectiveAddress(address
, scr
);
1171 m_assembler
.fmovsWriterm(src
, scr
);
1172 releaseScratch(scr
);
1175 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1177 RegisterID scr
= claimScratch();
1178 m_assembler
.loadConstant(address
.offset
+ 8, scr
);
1179 m_assembler
.addlRegReg(address
.base
, scr
);
1180 m_assembler
.fmovsWriterndec(src
, scr
);
1181 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1182 releaseScratch(scr
);
1185 void storeDouble(FPRegisterID src
, BaseIndex address
)
1187 RegisterID scr
= claimScratch();
1189 loadEffectiveAddress(address
, scr
, 8);
1191 m_assembler
.fmovsWriterndec(src
, scr
);
1192 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1194 releaseScratch(scr
);
1197 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1200 addDouble(op2
, dest
);
1202 moveDouble(op2
, dest
);
1203 addDouble(op1
, dest
);
1207 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
1209 RegisterID scr
= claimScratch();
1210 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address
.m_value
)) + 8, scr
);
1211 m_assembler
.fmovsWriterndec(src
, scr
);
1212 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1213 releaseScratch(scr
);
1216 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1218 m_assembler
.daddRegReg(src
, dest
);
1221 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1223 loadDouble(TrustedImmPtr(address
.m_ptr
), fscratch
);
1224 addDouble(fscratch
, dest
);
1227 void addDouble(Address address
, FPRegisterID dest
)
1229 loadDouble(address
, fscratch
);
1230 addDouble(fscratch
, dest
);
1233 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1235 m_assembler
.dsubRegReg(src
, dest
);
1238 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1241 moveDouble(op1
, fscratch
);
1242 subDouble(op2
, fscratch
);
1243 moveDouble(fscratch
, dest
);
1245 moveDouble(op1
, dest
);
1246 subDouble(op2
, dest
);
1250 void subDouble(Address address
, FPRegisterID dest
)
1252 loadDouble(address
, fscratch
);
1253 subDouble(fscratch
, dest
);
1256 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1258 m_assembler
.dmulRegReg(src
, dest
);
1261 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1264 mulDouble(op2
, dest
);
1266 moveDouble(op2
, dest
);
1267 mulDouble(op1
, dest
);
1271 void mulDouble(Address address
, FPRegisterID dest
)
1273 loadDouble(address
, fscratch
);
1274 mulDouble(fscratch
, dest
);
1277 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1279 m_assembler
.ddivRegReg(src
, dest
);
1282 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1285 moveDouble(op1
, fscratch
);
1286 divDouble(op2
, fscratch
);
1287 moveDouble(fscratch
, dest
);
1289 moveDouble(op1
, dest
);
1290 divDouble(op2
, dest
);
1294 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1296 moveDouble(src
, dest
);
1297 m_assembler
.dneg(dest
);
1300 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
1302 m_assembler
.fldsfpul(src
);
1303 m_assembler
.dcnvsd(dst
);
1306 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
1308 m_assembler
.dcnvds(src
);
1309 m_assembler
.fstsfpul(dst
);
1312 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1314 m_assembler
.ldsrmfpul(src
);
1315 m_assembler
.floatfpulDreg(dest
);
1318 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
1320 RegisterID scr
= claimScratch();
1321 load32(src
.m_ptr
, scr
);
1322 convertInt32ToDouble(scr
, dest
);
1323 releaseScratch(scr
);
1326 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
1328 RegisterID scr
= claimScratch();
1330 convertInt32ToDouble(scr
, dest
);
1331 releaseScratch(scr
);
1334 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
1336 RegisterID scr
= claimScratch();
1340 loadEffectiveAddress(address
, scr
);
1342 RegisterID scr1
= claimScratch();
1343 if (dest
!= SH4Registers::r0
)
1344 move(SH4Registers::r0
, scr1
);
1346 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 58, sizeof(uint32_t));
1347 move(scr
, SH4Registers::r0
);
1348 m_assembler
.testlImm8r(0x3, SH4Registers::r0
);
1349 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1351 if (dest
!= SH4Registers::r0
)
1352 move(scr1
, SH4Registers::r0
);
1355 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1358 m_assembler
.testlImm8r(0x1, SH4Registers::r0
);
1360 if (dest
!= SH4Registers::r0
)
1361 move(scr1
, SH4Registers::r0
);
1363 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1364 load16PostInc(scr
, scr1
);
1366 m_assembler
.shllImm8r(16, dest
);
1368 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1371 load8PostInc(scr
, scr1
);
1372 load16PostInc(scr
, dest
);
1373 m_assembler
.shllImm8r(8, dest
);
1376 m_assembler
.shllImm8r(8, dest
);
1377 m_assembler
.shllImm8r(16, dest
);
1381 releaseScratch(scr
);
1382 releaseScratch(scr1
);
1385 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1387 RegisterID scr
= scratchReg3
;
1388 load32WithUnalignedHalfWords(left
, scr
);
1389 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1390 m_assembler
.testlRegReg(scr
, scr
);
1392 compare32(right
.m_value
, scr
, cond
);
1394 if (cond
== NotEqual
)
1395 return branchFalse();
1396 return branchTrue();
1399 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
1401 m_assembler
.movImm8(0, scratchReg3
);
1402 convertInt32ToDouble(scratchReg3
, scratch
);
1403 return branchDouble(DoubleNotEqual
, reg
, scratch
);
1406 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
1408 m_assembler
.movImm8(0, scratchReg3
);
1409 convertInt32ToDouble(scratchReg3
, scratch
);
1410 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
1413 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1415 if (cond
== DoubleEqual
) {
1416 m_assembler
.dcmppeq(right
, left
);
1417 return branchTrue();
1420 if (cond
== DoubleNotEqual
) {
1422 m_assembler
.dcmppeq(left
, left
);
1423 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1424 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1425 m_assembler
.dcmppeq(right
, right
);
1426 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1427 m_assembler
.dcmppeq(right
, left
);
1428 Jump m_jump
= branchFalse();
1433 if (cond
== DoubleGreaterThan
) {
1434 m_assembler
.dcmppgt(right
, left
);
1435 return branchTrue();
1438 if (cond
== DoubleGreaterThanOrEqual
) {
1440 m_assembler
.dcmppeq(left
, left
);
1441 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1442 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1443 m_assembler
.dcmppeq(right
, right
);
1444 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1445 m_assembler
.dcmppgt(left
, right
);
1446 Jump m_jump
= branchFalse();
1451 if (cond
== DoubleLessThan
) {
1452 m_assembler
.dcmppgt(left
, right
);
1453 return branchTrue();
1456 if (cond
== DoubleLessThanOrEqual
) {
1458 m_assembler
.dcmppeq(left
, left
);
1459 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1460 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1461 m_assembler
.dcmppeq(right
, right
);
1462 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1463 m_assembler
.dcmppgt(right
, left
);
1464 Jump m_jump
= branchFalse();
1469 if (cond
== DoubleEqualOrUnordered
) {
1470 JumpList takeBranch
;
1471 m_assembler
.dcmppeq(left
, left
);
1472 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1473 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1474 m_assembler
.dcmppeq(right
, right
);
1475 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1476 m_assembler
.dcmppeq(left
, right
);
1477 m_assembler
.branch(BF_OPCODE
, 2);
1478 takeBranch
.link(this);
1479 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1482 if (cond
== DoubleGreaterThanOrUnordered
) {
1483 JumpList takeBranch
;
1484 m_assembler
.dcmppeq(left
, left
);
1485 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1486 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1487 m_assembler
.dcmppeq(right
, right
);
1488 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1489 m_assembler
.dcmppgt(right
, left
);
1490 m_assembler
.branch(BF_OPCODE
, 2);
1491 takeBranch
.link(this);
1492 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1495 if (cond
== DoubleGreaterThanOrEqualOrUnordered
) {
1496 m_assembler
.dcmppgt(left
, right
);
1497 return branchFalse();
1500 if (cond
== DoubleLessThanOrUnordered
) {
1501 JumpList takeBranch
;
1502 m_assembler
.dcmppeq(left
, left
);
1503 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1504 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1505 m_assembler
.dcmppeq(right
, right
);
1506 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1507 m_assembler
.dcmppgt(left
, right
);
1508 m_assembler
.branch(BF_OPCODE
, 2);
1509 takeBranch
.link(this);
1510 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1513 if (cond
== DoubleLessThanOrEqualOrUnordered
) {
1514 m_assembler
.dcmppgt(right
, left
);
1515 return branchFalse();
1518 ASSERT(cond
== DoubleNotEqualOrUnordered
);
1519 m_assembler
.dcmppeq(right
, left
);
1520 return branchFalse();
1525 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1526 m_assembler
.branch(BF_OPCODE
, 2);
1527 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1532 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1533 m_assembler
.branch(BT_OPCODE
, 2);
1534 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1537 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1539 RegisterID scr
= claimScratch();
1540 move(left
.index
, scr
);
1541 lshift32(TrustedImm32(left
.scale
), scr
);
1542 add32(left
.base
, scr
);
1543 load32(scr
, left
.offset
, scr
);
1544 compare32(right
.m_value
, scr
, cond
);
1545 releaseScratch(scr
);
1547 if (cond
== NotEqual
)
1548 return branchFalse();
1549 return branchTrue();
1552 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1554 moveDouble(src
, dest
);
1555 m_assembler
.dsqrt(dest
);
1558 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1560 moveDouble(src
, dest
);
1561 m_assembler
.dabs(dest
);
1564 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1566 RegisterID addressTempRegister
= claimScratch();
1567 load8(address
, addressTempRegister
);
1568 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1569 releaseScratch(addressTempRegister
);
1573 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1575 RegisterID addressTempRegister
= claimScratch();
1576 load8(address
, addressTempRegister
);
1577 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1578 releaseScratch(addressTempRegister
);
1582 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1584 RegisterID addressTempRegister
= claimScratch();
1585 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
1586 load8(Address(addressTempRegister
), addressTempRegister
);
1587 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1588 releaseScratch(addressTempRegister
);
1592 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1597 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1602 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1604 RegisterID addressTempRegister
= claimScratch();
1605 load8(left
, addressTempRegister
);
1606 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1607 releaseScratch(addressTempRegister
);
1611 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1613 RegisterID addressTempRegister
= claimScratch();
1614 load8(left
, addressTempRegister
);
1615 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1616 releaseScratch(addressTempRegister
);
1620 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1622 RegisterID addressTempRegister
= claimScratch();
1623 load8(left
, addressTempRegister
);
1624 compare32(cond
, addressTempRegister
, right
, dest
);
1625 releaseScratch(addressTempRegister
);
1628 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1629 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1632 truncateDoubleToInt32(src
, dest
);
1633 RegisterID intscr
= claimScratch();
1634 m_assembler
.loadConstant(0x7fffffff, intscr
);
1635 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1636 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 12, sizeof(uint32_t));
1637 if (branchType
== BranchIfTruncateFailed
) {
1638 m_assembler
.branch(BT_OPCODE
, 2);
1639 m_assembler
.addlImm8r(1, intscr
);
1640 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1641 result
= branchTrue();
1643 Jump out
= Jump(m_assembler
.je(), SH4Assembler::JumpNear
);
1644 m_assembler
.addlImm8r(1, intscr
);
1645 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1646 result
= branchFalse();
1649 releaseScratch(intscr
);
1653 Jump
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1656 RegisterID intscr
= claimScratch();
1657 m_assembler
.loadConstant(0x80000000, intscr
);
1658 convertInt32ToDouble(intscr
, fscratch
);
1659 addDouble(src
, fscratch
);
1660 truncateDoubleToInt32(fscratch
, dest
);
1661 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1662 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 16, sizeof(uint32_t));
1663 if (branchType
== BranchIfTruncateFailed
) {
1664 m_assembler
.branch(BT_OPCODE
, 4);
1665 m_assembler
.addlImm8r(-1, intscr
);
1666 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1667 m_assembler
.addlImm8r(1, intscr
);
1668 m_assembler
.sublRegReg(intscr
, dest
);
1669 result
= branchTrue();
1671 Jump out
= Jump(m_assembler
.je(), SH4Assembler::JumpNear
);
1672 m_assembler
.addlImm8r(-1, intscr
);
1673 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1674 m_assembler
.addlImm8r(1, intscr
);
1675 m_assembler
.sublRegReg(intscr
, dest
);
1676 result
= branchFalse();
1679 releaseScratch(intscr
);
1683 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1685 m_assembler
.ftrcdrmfpul(src
);
1686 m_assembler
.stsfpulReg(dest
);
1689 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1691 RegisterID intscr
= claimScratch();
1692 m_assembler
.loadConstant(0x80000000, intscr
);
1693 convertInt32ToDouble(intscr
, fscratch
);
1694 addDouble(src
, fscratch
);
1695 m_assembler
.ftrcdrmfpul(fscratch
);
1696 m_assembler
.stsfpulReg(dest
);
1697 m_assembler
.sublRegReg(intscr
, dest
);
1698 releaseScratch(intscr
);
1701 // Stack manipulation operations
1703 void pop(RegisterID dest
)
1705 m_assembler
.popReg(dest
);
1708 void push(RegisterID src
)
1710 m_assembler
.pushReg(src
);
1713 void push(TrustedImm32 imm
)
1715 RegisterID scr
= claimScratch();
1716 m_assembler
.loadConstant(imm
.m_value
, scr
);
1718 releaseScratch(scr
);
1721 // Register move operations
1723 void move(TrustedImm32 imm
, RegisterID dest
)
1725 m_assembler
.loadConstant(imm
.m_value
, dest
);
1728 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
1730 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
, sizeof(uint32_t));
1731 DataLabelPtr
dataLabel(this);
1732 m_assembler
.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue
.m_value
), dest
);
1736 void move(RegisterID src
, RegisterID dest
)
1739 m_assembler
.movlRegReg(src
, dest
);
1742 void move(TrustedImmPtr imm
, RegisterID dest
)
1744 m_assembler
.loadConstant(imm
.asIntptr(), dest
);
1747 void swap(RegisterID reg1
, RegisterID reg2
)
1756 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1758 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1759 if (cond
!= NotEqual
) {
1760 m_assembler
.movt(dest
);
1764 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1765 m_assembler
.movImm8(0, dest
);
1766 m_assembler
.branch(BT_OPCODE
, 0);
1767 m_assembler
.movImm8(1, dest
);
1770 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1774 compare32(cond
, left
, dest
, dest
);
1778 RegisterID scr
= claimScratch();
1780 compare32(cond
, left
, scr
, dest
);
1781 releaseScratch(scr
);
1784 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1786 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1788 load8(address
, dest
);
1789 if (mask
.m_value
== -1)
1790 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1792 testlImm(mask
.m_value
, dest
);
1793 if (cond
!= NonZero
) {
1794 m_assembler
.movt(dest
);
1798 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1799 m_assembler
.movImm8(0, dest
);
1800 m_assembler
.branch(BT_OPCODE
, 0);
1801 m_assembler
.movImm8(1, dest
);
1804 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1806 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1808 load32(address
, dest
);
1809 if (mask
.m_value
== -1)
1810 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1812 testlImm(mask
.m_value
, dest
);
1813 if (cond
!= NonZero
) {
1814 m_assembler
.movt(dest
);
1818 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1819 m_assembler
.movImm8(0, dest
);
1820 m_assembler
.branch(BT_OPCODE
, 0);
1821 m_assembler
.movImm8(1, dest
);
1824 void loadPtrLinkReg(ImplicitAddress address
)
1826 RegisterID scr
= claimScratch();
1827 load32(address
, scr
);
1828 m_assembler
.ldspr(scr
);
1829 releaseScratch(scr
);
1832 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1834 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1835 /* BT label => BF off
1840 if (cond
== NotEqual
)
1841 return branchFalse();
1842 return branchTrue();
1845 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1847 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1848 m_assembler
.testlRegReg(left
, left
);
1850 compare32(right
.m_value
, left
, cond
);
1852 if (cond
== NotEqual
)
1853 return branchFalse();
1854 return branchTrue();
1857 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1859 compare32(right
.offset
, right
.base
, left
, cond
);
1860 if (cond
== NotEqual
)
1861 return branchFalse();
1862 return branchTrue();
1865 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1867 compare32(right
, left
.offset
, left
.base
, cond
);
1868 if (cond
== NotEqual
)
1869 return branchFalse();
1870 return branchTrue();
1873 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1875 compare32(right
.m_value
, left
.offset
, left
.base
, cond
);
1876 if (cond
== NotEqual
)
1877 return branchFalse();
1878 return branchTrue();
1881 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1883 RegisterID scr
= claimScratch();
1885 load32(left
.m_ptr
, scr
);
1886 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
1887 releaseScratch(scr
);
1889 if (cond
== NotEqual
)
1890 return branchFalse();
1891 return branchTrue();
1894 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1896 RegisterID addressTempRegister
= claimScratch();
1898 move(TrustedImmPtr(left
.m_ptr
), addressTempRegister
);
1899 m_assembler
.movlMemReg(addressTempRegister
, addressTempRegister
);
1900 compare32(right
.m_value
, addressTempRegister
, cond
);
1901 releaseScratch(addressTempRegister
);
1903 if (cond
== NotEqual
)
1904 return branchFalse();
1905 return branchTrue();
1908 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1910 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1911 RegisterID lefttmp
= claimScratch();
1913 loadEffectiveAddress(left
, lefttmp
);
1915 load8(lefttmp
, lefttmp
);
1916 RegisterID righttmp
= claimScratch();
1917 m_assembler
.loadConstant(right
.m_value
, righttmp
);
1919 Jump result
= branch32(cond
, lefttmp
, righttmp
);
1920 releaseScratch(lefttmp
);
1921 releaseScratch(righttmp
);
1925 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1927 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1929 m_assembler
.testlRegReg(reg
, mask
);
1931 if (cond
== NonZero
) // NotEqual
1932 return branchFalse();
1933 return branchTrue();
1936 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1938 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1940 if (mask
.m_value
== -1)
1941 m_assembler
.testlRegReg(reg
, reg
);
1943 testlImm(mask
.m_value
, reg
);
1945 if (cond
== NonZero
) // NotEqual
1946 return branchFalse();
1947 return branchTrue();
1950 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1952 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1954 if (mask
.m_value
== -1)
1955 compare32(0, address
.offset
, address
.base
, static_cast<RelationalCondition
>(cond
));
1957 testImm(mask
.m_value
, address
.offset
, address
.base
);
1959 if (cond
== NonZero
) // NotEqual
1960 return branchFalse();
1961 return branchTrue();
1964 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1966 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1968 RegisterID scr
= claimScratch();
1970 move(address
.index
, scr
);
1971 lshift32(TrustedImm32(address
.scale
), scr
);
1972 add32(address
.base
, scr
);
1973 load32(scr
, address
.offset
, scr
);
1975 if (mask
.m_value
== -1)
1976 m_assembler
.testlRegReg(scr
, scr
);
1978 testlImm(mask
.m_value
, scr
);
1980 releaseScratch(scr
);
1982 if (cond
== NonZero
) // NotEqual
1983 return branchFalse();
1984 return branchTrue();
1989 return Jump(m_assembler
.jmp());
1992 void jump(RegisterID target
)
1994 m_assembler
.jmpReg(target
);
1997 void jump(Address address
)
1999 RegisterID scr
= claimScratch();
2000 load32(address
, scr
);
2001 m_assembler
.jmpReg(scr
);
2002 releaseScratch(scr
);
2005 void jump(AbsoluteAddress address
)
2007 RegisterID scr
= claimScratch();
2009 move(TrustedImmPtr(address
.m_ptr
), scr
);
2010 m_assembler
.movlMemReg(scr
, scr
);
2011 m_assembler
.jmpReg(scr
);
2012 releaseScratch(scr
);
2015 // Arithmetic control flow operations
2017 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
2019 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2021 if (cond
== Overflow
)
2022 return branchMul32(cond
, TrustedImm32(-1), srcDest
, srcDest
);
2026 if (cond
== Signed
) {
2027 m_assembler
.cmppz(srcDest
);
2028 return branchFalse();
2031 compare32(0, srcDest
, Equal
);
2032 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2035 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2037 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2039 if (cond
== Overflow
) {
2040 m_assembler
.addvlRegReg(src
, dest
);
2041 return branchTrue();
2044 m_assembler
.addlRegReg(src
, dest
);
2046 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2047 m_assembler
.cmppz(dest
);
2048 return (cond
== Signed
) ? branchFalse() : branchTrue();
2051 compare32(0, dest
, Equal
);
2052 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2055 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2057 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2059 if (cond
== Overflow
) {
2061 m_assembler
.addvlRegReg(src2
, dest
);
2064 m_assembler
.addvlRegReg(src1
, dest
);
2066 return branchTrue();
2069 add32(src1
, src2
, dest
);
2071 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2072 m_assembler
.cmppz(dest
);
2073 return (cond
== Signed
) ? branchFalse() : branchTrue();
2076 compare32(0, dest
, Equal
);
2077 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2080 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2082 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2084 RegisterID immval
= claimScratch();
2086 Jump result
= branchAdd32(cond
, immval
, dest
);
2087 releaseScratch(immval
);
2091 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
2093 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2097 if (cond
== Overflow
) {
2098 move(imm
, scratchReg3
);
2099 m_assembler
.addvlRegReg(scratchReg3
, dest
);
2100 return branchTrue();
2105 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2106 m_assembler
.cmppz(dest
);
2107 return (cond
== Signed
) ? branchFalse() : branchTrue();
2110 compare32(0, dest
, Equal
);
2111 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2114 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
2116 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2119 move(imm
, scratchReg3
);
2120 RegisterID destptr
= claimScratch();
2121 RegisterID destval
= claimScratch();
2122 move(TrustedImmPtr(dest
.m_ptr
), destptr
);
2123 m_assembler
.movlMemReg(destptr
, destval
);
2124 if (cond
== Overflow
) {
2125 m_assembler
.addvlRegReg(scratchReg3
, destval
);
2128 m_assembler
.addlRegReg(scratchReg3
, destval
);
2129 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2130 m_assembler
.cmppz(destval
);
2131 result
= (cond
== PositiveOrZero
);
2133 m_assembler
.testlRegReg(destval
, destval
);
2134 result
= (cond
!= NonZero
);
2137 m_assembler
.movlRegMem(destval
, destptr
);
2138 releaseScratch(destval
);
2139 releaseScratch(destptr
);
2140 return result
? branchTrue() : branchFalse();
2143 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2145 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2147 if (cond
== Overflow
) {
2148 RegisterID scrsign
= claimScratch();
2149 RegisterID msbres
= claimScratch();
2150 m_assembler
.dmulslRegReg(src
, dest
);
2151 m_assembler
.stsmacl(dest
);
2152 m_assembler
.cmppz(dest
);
2153 m_assembler
.movt(scrsign
);
2154 m_assembler
.addlImm8r(-1, scrsign
);
2155 m_assembler
.stsmach(msbres
);
2156 m_assembler
.cmplRegReg(msbres
, scrsign
, SH4Condition(Equal
));
2157 releaseScratch(msbres
);
2158 releaseScratch(scrsign
);
2159 return branchFalse();
2164 if (cond
== Signed
) {
2165 m_assembler
.cmppz(dest
);
2166 return branchFalse();
2169 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2170 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2173 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2175 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2177 if (cond
== Overflow
) {
2178 RegisterID scrsign
= claimScratch();
2179 RegisterID msbres
= claimScratch();
2180 m_assembler
.dmulslRegReg(src1
, src2
);
2181 m_assembler
.stsmacl(dest
);
2182 m_assembler
.cmppz(dest
);
2183 m_assembler
.movt(scrsign
);
2184 m_assembler
.addlImm8r(-1, scrsign
);
2185 m_assembler
.stsmach(msbres
);
2186 m_assembler
.cmplRegReg(msbres
, scrsign
, SH4Condition(Equal
));
2187 releaseScratch(msbres
);
2188 releaseScratch(scrsign
);
2189 return branchFalse();
2192 mul32(src1
, src2
, dest
);
2194 if (cond
== Signed
) {
2195 m_assembler
.cmppz(dest
);
2196 return branchFalse();
2199 compare32(0, dest
, Equal
);
2200 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2203 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
2205 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2208 move(imm
, scratchReg3
);
2209 return branchMul32(cond
, scratchReg3
, dest
);
2213 return branchMul32(cond
, src
, dest
);
2216 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2218 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2220 if (cond
== Overflow
) {
2221 m_assembler
.subvlRegReg(src
, dest
);
2222 return branchTrue();
2227 if (cond
== Signed
) {
2228 m_assembler
.cmppz(dest
);
2229 return branchFalse();
2232 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2233 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2236 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2238 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2240 RegisterID immval
= claimScratch();
2242 Jump result
= branchSub32(cond
, immval
, dest
);
2243 releaseScratch(immval
);
2247 Jump
branchSub32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
2249 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2252 return branchSub32(cond
, imm
, dest
);
2255 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2257 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2261 return branchSub32(cond
, src2
, dest
);
2264 if (cond
== Overflow
) {
2265 RegisterID tmpval
= claimScratch();
2267 m_assembler
.subvlRegReg(src2
, tmpval
);
2269 releaseScratch(tmpval
);
2270 return branchTrue();
2273 RegisterID tmpval
= claimScratch();
2275 sub32(src2
, tmpval
);
2277 releaseScratch(tmpval
);
2279 if (cond
== Signed
) {
2280 m_assembler
.cmppz(dest
);
2281 return branchFalse();
2284 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2285 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2288 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2290 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2294 if (cond
== Signed
) {
2295 m_assembler
.cmppz(dest
);
2296 return branchFalse();
2299 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2300 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2303 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
2305 truncateDoubleToInt32(src
, dest
);
2306 convertInt32ToDouble(dest
, fscratch
);
2307 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fscratch
, src
));
2310 failureCases
.append(branch32(Equal
, dest
, TrustedImm32(0)));
2313 void neg32(RegisterID dst
)
2315 m_assembler
.neg(dst
, dst
);
2318 void urshift32(RegisterID shiftamount
, RegisterID dest
)
2320 RegisterID shiftTmp
= claimScratch();
2321 m_assembler
.loadConstant(0x1f, shiftTmp
);
2322 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
2323 m_assembler
.neg(shiftTmp
, shiftTmp
);
2324 m_assembler
.shldRegReg(dest
, shiftTmp
);
2325 releaseScratch(shiftTmp
);
2328 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
2331 urshift32(shiftAmount
, dest
);
2334 void urshift32(TrustedImm32 imm
, RegisterID dest
)
2336 int immMasked
= imm
.m_value
& 0x1f;
2340 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
2341 m_assembler
.shlrImm8r(immMasked
, dest
);
2345 RegisterID shiftTmp
= claimScratch();
2346 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
2347 m_assembler
.shldRegReg(dest
, shiftTmp
);
2348 releaseScratch(shiftTmp
);
2351 void urshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
2354 urshift32(shiftamount
, dest
);
2359 return Call(m_assembler
.call(), Call::Linkable
);
2364 return Call(m_assembler
.call(), Call::LinkableNear
);
2367 Call
call(RegisterID target
)
2369 return Call(m_assembler
.call(target
), Call::None
);
2372 void call(Address address
)
2374 RegisterID target
= claimScratch();
2375 load32(address
.base
, address
.offset
, target
);
2376 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
2377 m_assembler
.branch(JSR_OPCODE
, target
);
2379 releaseScratch(target
);
2384 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
2389 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2391 RegisterID dataTempRegister
= claimScratch();
2393 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 10, 2 * sizeof(uint32_t));
2394 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
2395 m_assembler
.cmplRegReg(dataTempRegister
, left
, SH4Condition(cond
));
2396 releaseScratch(dataTempRegister
);
2398 if (cond
== NotEqual
)
2399 return branchFalse();
2400 return branchTrue();
2403 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2405 RegisterID scr
= claimScratch();
2407 m_assembler
.loadConstant(left
.offset
, scr
);
2408 m_assembler
.addlRegReg(left
.base
, scr
);
2409 m_assembler
.movlMemReg(scr
, scr
);
2410 RegisterID scr1
= claimScratch();
2411 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 10, 2 * sizeof(uint32_t));
2412 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
2413 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
2414 releaseScratch(scr
);
2415 releaseScratch(scr1
);
2417 if (cond
== NotEqual
)
2418 return branchFalse();
2419 return branchTrue();
2428 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2430 RegisterID scr
= claimScratch();
2431 DataLabelPtr label
= moveWithPatch(initialValue
, scr
);
2432 store32(scr
, address
);
2433 releaseScratch(scr
);
2437 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
2439 int sizeOfConstantPool()
2441 return m_assembler
.sizeOfConstantPool();
2444 Call
tailRecursiveCall()
2446 RegisterID scr
= claimScratch();
2448 m_assembler
.loadConstantUnReusable(0x0, scr
, true);
2449 Jump m_jump
= Jump(m_assembler
.jmp(scr
));
2450 releaseScratch(scr
);
2452 return Call::fromTailJump(m_jump
);
2455 Call
makeTailRecursiveCall(Jump oldJump
)
2458 return tailRecursiveCall();
2468 m_assembler
.synco();
2471 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2473 return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call
.dataLocation())));
2476 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2478 SH4Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2481 static ptrdiff_t maxJumpReplacementSize()
2483 return SH4Assembler::maxJumpReplacementSize();
2486 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2488 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2490 return label
.labelAtOffset(0);
2493 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID rd
, void* initialValue
)
2495 SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart
.dataLocation(), rd
, reinterpret_cast<int>(initialValue
));
2498 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2500 UNREACHABLE_FOR_PLATFORM();
2501 return CodeLocationLabel();
2504 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
2506 UNREACHABLE_FOR_PLATFORM();
2510 SH4Assembler::Condition
SH4Condition(RelationalCondition cond
)
2512 return static_cast<SH4Assembler::Condition
>(cond
);
2515 SH4Assembler::Condition
SH4Condition(ResultCondition cond
)
2517 return static_cast<SH4Assembler::Condition
>(cond
);
2520 friend class LinkBuffer
;
2521 friend class RepatchBuffer
;
2523 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2525 SH4Assembler::linkCall(code
, call
.m_label
, function
.value());
2528 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2530 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2533 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2535 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2541 #endif // ENABLE(ASSEMBLER)
2543 #endif // MacroAssemblerSH4_h