2 * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
3 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
4 * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerSH4_h
29 #define MacroAssemblerSH4_h
31 #if ENABLE(ASSEMBLER) && CPU(SH4)
33 #include "SH4Assembler.h"
34 #include "AbstractMacroAssembler.h"
35 #include <wtf/Assertions.h>
39 class MacroAssemblerSH4
: public AbstractMacroAssembler
<SH4Assembler
, MacroAssemblerSH4
> {
41 typedef SH4Assembler::FPRegisterID FPRegisterID
;
43 static const Scale ScalePtr
= TimesFour
;
44 static const FPRegisterID fscratch
= SH4Registers::dr10
;
45 static const RegisterID stackPointerRegister
= SH4Registers::sp
;
46 static const RegisterID framePointerRegister
= SH4Registers::fp
;
47 static const RegisterID linkRegister
= SH4Registers::pr
;
48 static const RegisterID scratchReg3
= SH4Registers::r13
;
50 static const int MaximumCompactPtrAlignedAddressOffset
= 60;
52 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
54 return (value
>= 0) && (value
<= MaximumCompactPtrAlignedAddressOffset
) && (!(value
& 3));
57 enum RelationalCondition
{
58 Equal
= SH4Assembler::EQ
,
59 NotEqual
= SH4Assembler::NE
,
60 Above
= SH4Assembler::HI
,
61 AboveOrEqual
= SH4Assembler::HS
,
62 Below
= SH4Assembler::LI
,
63 BelowOrEqual
= SH4Assembler::LS
,
64 GreaterThan
= SH4Assembler::GT
,
65 GreaterThanOrEqual
= SH4Assembler::GE
,
66 LessThan
= SH4Assembler::LT
,
67 LessThanOrEqual
= SH4Assembler::LE
70 enum ResultCondition
{
71 Overflow
= SH4Assembler::OF
,
72 Signed
= SH4Assembler::SI
,
73 PositiveOrZero
= SH4Assembler::NS
,
74 Zero
= SH4Assembler::EQ
,
75 NonZero
= SH4Assembler::NE
78 enum DoubleCondition
{
79 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
80 DoubleEqual
= SH4Assembler::EQ
,
81 DoubleNotEqual
= SH4Assembler::NE
,
82 DoubleGreaterThan
= SH4Assembler::GT
,
83 DoubleGreaterThanOrEqual
= SH4Assembler::GE
,
84 DoubleLessThan
= SH4Assembler::LT
,
85 DoubleLessThanOrEqual
= SH4Assembler::LE
,
86 // If either operand is NaN, these conditions always evaluate to true.
87 DoubleEqualOrUnordered
= SH4Assembler::EQU
,
88 DoubleNotEqualOrUnordered
= SH4Assembler::NEU
,
89 DoubleGreaterThanOrUnordered
= SH4Assembler::GTU
,
90 DoubleGreaterThanOrEqualOrUnordered
= SH4Assembler::GEU
,
91 DoubleLessThanOrUnordered
= SH4Assembler::LTU
,
92 DoubleLessThanOrEqualOrUnordered
= SH4Assembler::LEU
,
95 RegisterID
claimScratch()
97 return m_assembler
.claimScratch();
100 void releaseScratch(RegisterID reg
)
102 m_assembler
.releaseScratch(reg
);
105 static RelationalCondition
invert(RelationalCondition cond
)
121 return LessThanOrEqual
;
122 case GreaterThanOrEqual
:
125 return GreaterThanOrEqual
;
126 case LessThanOrEqual
:
129 RELEASE_ASSERT_NOT_REACHED();
133 // Integer arithmetic operations
135 void add32(RegisterID src
, RegisterID dest
)
137 m_assembler
.addlRegReg(src
, dest
);
140 void add32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
150 void add32(TrustedImm32 imm
, RegisterID dest
)
155 if (m_assembler
.isImmediate(imm
.m_value
)) {
156 m_assembler
.addlImm8r(imm
.m_value
, dest
);
160 RegisterID scr
= claimScratch();
161 m_assembler
.loadConstant(imm
.m_value
, scr
);
162 m_assembler
.addlRegReg(scr
, dest
);
166 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
172 void add32(TrustedImm32 imm
, Address address
)
177 RegisterID scr
= claimScratch();
178 load32(address
, scr
);
180 store32(scr
, address
);
184 void add32(Address src
, RegisterID dest
)
186 RegisterID scr
= claimScratch();
188 m_assembler
.addlRegReg(scr
, dest
);
192 void add32(AbsoluteAddress src
, RegisterID dest
)
194 RegisterID scr
= claimScratch();
195 load32(src
.m_ptr
, scr
);
196 m_assembler
.addlRegReg(scr
, dest
);
200 void and32(RegisterID src
, RegisterID dest
)
202 m_assembler
.andlRegReg(src
, dest
);
205 void and32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
215 void and32(Address src
, RegisterID dest
)
217 RegisterID scr
= claimScratch();
223 void and32(TrustedImm32 imm
, RegisterID dest
)
226 m_assembler
.movImm8(0, dest
);
230 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
231 m_assembler
.andlImm8r(imm
.m_value
, dest
);
235 RegisterID scr
= claimScratch();
236 m_assembler
.loadConstant(imm
.m_value
, scr
);
237 m_assembler
.andlRegReg(scr
, dest
);
241 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
252 void lshift32(RegisterID shiftamount
, RegisterID dest
)
254 RegisterID shiftTmp
= claimScratch();
255 m_assembler
.loadConstant(0x1f, shiftTmp
);
256 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
257 m_assembler
.shldRegReg(dest
, shiftTmp
);
258 releaseScratch(shiftTmp
);
261 void lshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
264 lshift32(shiftAmount
, dest
);
267 void lshift32(TrustedImm32 imm
, RegisterID dest
)
269 int immMasked
= imm
.m_value
& 0x1f;
273 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
274 m_assembler
.shllImm8r(immMasked
, dest
);
278 RegisterID shiftTmp
= claimScratch();
279 m_assembler
.loadConstant(immMasked
, shiftTmp
);
280 m_assembler
.shldRegReg(dest
, shiftTmp
);
281 releaseScratch(shiftTmp
);
284 void lshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
287 lshift32(shiftamount
, dest
);
290 void mul32(RegisterID src
, RegisterID dest
)
292 mul32(src
, dest
, dest
);
295 void mul32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
297 m_assembler
.imullRegReg(src1
, src2
);
298 m_assembler
.stsmacl(dest
);
301 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
304 RegisterID immval
= claimScratch();
307 releaseScratch(immval
);
314 void or32(RegisterID src
, RegisterID dest
)
316 m_assembler
.orlRegReg(src
, dest
);
319 void or32(TrustedImm32 imm
, RegisterID dest
)
321 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
322 m_assembler
.orlImm8r(imm
.m_value
, dest
);
326 RegisterID scr
= claimScratch();
327 m_assembler
.loadConstant(imm
.m_value
, scr
);
328 m_assembler
.orlRegReg(scr
, dest
);
332 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
336 else if (op1
== dest
)
344 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
355 void or32(RegisterID src
, AbsoluteAddress address
)
357 RegisterID destptr
= claimScratch();
358 move(TrustedImmPtr(address
.m_ptr
), destptr
);
359 RegisterID destval
= claimScratch();
360 m_assembler
.movlMemReg(destptr
, destval
);
361 m_assembler
.orlRegReg(src
, destval
);
362 m_assembler
.movlRegMem(destval
, destptr
);
363 releaseScratch(destval
);
364 releaseScratch(destptr
);
367 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
378 void rshift32(RegisterID shiftamount
, RegisterID dest
)
380 RegisterID shiftTmp
= claimScratch();
381 m_assembler
.loadConstant(0x1f, shiftTmp
);
382 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
383 m_assembler
.neg(shiftTmp
, shiftTmp
);
384 m_assembler
.shadRegReg(dest
, shiftTmp
);
385 releaseScratch(shiftTmp
);
388 void rshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
391 rshift32(shiftAmount
, dest
);
394 void rshift32(TrustedImm32 imm
, RegisterID dest
)
396 int immMasked
= imm
.m_value
& 0x1f;
400 if (immMasked
== 1) {
401 m_assembler
.sharImm8r(immMasked
, dest
);
405 RegisterID shiftTmp
= claimScratch();
406 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
407 m_assembler
.shadRegReg(dest
, shiftTmp
);
408 releaseScratch(shiftTmp
);
411 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
417 void sub32(RegisterID src
, RegisterID dest
)
419 m_assembler
.sublRegReg(src
, dest
);
422 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
427 RegisterID result
= claimScratch();
428 RegisterID scratchReg
= claimScratch();
430 move(TrustedImmPtr(address
.m_ptr
), scratchReg
);
431 m_assembler
.movlMemReg(scratchReg
, result
);
433 if (m_assembler
.isImmediate(-imm
.m_value
))
434 m_assembler
.addlImm8r(-imm
.m_value
, result
);
436 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
437 m_assembler
.sublRegReg(scratchReg3
, result
);
440 store32(result
, scratchReg
);
441 releaseScratch(result
);
442 releaseScratch(scratchReg
);
445 void sub32(TrustedImm32 imm
, Address address
)
447 add32(TrustedImm32(-imm
.m_value
), address
);
450 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
455 RegisterID result
= claimScratch();
456 RegisterID scratchReg
= claimScratch();
458 move(TrustedImmPtr(address
.m_ptr
), scratchReg
);
459 m_assembler
.movlMemReg(scratchReg
, result
);
461 if (m_assembler
.isImmediate(imm
.m_value
))
462 m_assembler
.addlImm8r(imm
.m_value
, result
);
464 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
465 m_assembler
.addlRegReg(scratchReg3
, result
);
468 store32(result
, scratchReg
);
469 releaseScratch(result
);
470 releaseScratch(scratchReg
);
473 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
475 RegisterID scr1
= claimScratch();
476 RegisterID scr2
= claimScratch();
478 // Add 32-bit LSB first.
479 move(TrustedImmPtr(address
.m_ptr
), scratchReg3
);
480 m_assembler
.movlMemReg(scratchReg3
, scr1
); // scr1 = 32-bit LSB of int64 @ address
481 m_assembler
.loadConstant(imm
.m_value
, scr2
);
483 m_assembler
.addclRegReg(scr1
, scr2
);
484 m_assembler
.movlRegMem(scr2
, scratchReg3
); // Update address with 32-bit LSB result.
486 // Then add 32-bit MSB.
487 m_assembler
.addlImm8r(4, scratchReg3
);
488 m_assembler
.movlMemReg(scratchReg3
, scr1
); // scr1 = 32-bit MSB of int64 @ address
489 m_assembler
.movt(scr2
);
491 m_assembler
.addlImm8r(-1, scr2
); // Sign extend imm value if needed.
492 m_assembler
.addvlRegReg(scr2
, scr1
);
493 m_assembler
.movlRegMem(scr1
, scratchReg3
); // Update (address + 4) with 32-bit MSB result.
495 releaseScratch(scr2
);
496 releaseScratch(scr1
);
499 void sub32(TrustedImm32 imm
, RegisterID dest
)
504 if (m_assembler
.isImmediate(-imm
.m_value
)) {
505 m_assembler
.addlImm8r(-imm
.m_value
, dest
);
509 RegisterID scr
= claimScratch();
510 m_assembler
.loadConstant(imm
.m_value
, scr
);
511 m_assembler
.sublRegReg(scr
, dest
);
515 void sub32(Address src
, RegisterID dest
)
517 RegisterID scr
= claimScratch();
519 m_assembler
.sublRegReg(scr
, dest
);
523 void xor32(RegisterID src
, RegisterID dest
)
525 m_assembler
.xorlRegReg(src
, dest
);
528 void xor32(RegisterID src1
, RegisterID src2
, RegisterID dest
)
538 void xor32(TrustedImm32 imm
, RegisterID srcDest
)
540 if (imm
.m_value
== -1) {
541 m_assembler
.notlReg(srcDest
, srcDest
);
545 if ((srcDest
!= SH4Registers::r0
) || (imm
.m_value
> 255) || (imm
.m_value
< 0)) {
546 RegisterID scr
= claimScratch();
547 m_assembler
.loadConstant(imm
.m_value
, scr
);
548 m_assembler
.xorlRegReg(scr
, srcDest
);
553 m_assembler
.xorlImm8r(imm
.m_value
, srcDest
);
556 void compare32(int imm
, RegisterID dst
, RelationalCondition cond
)
558 if (((cond
== Equal
) || (cond
== NotEqual
)) && (dst
== SH4Registers::r0
) && m_assembler
.isImmediate(imm
)) {
559 m_assembler
.cmpEqImmR0(imm
, dst
);
563 if (((cond
== Equal
) || (cond
== NotEqual
)) && !imm
) {
564 m_assembler
.testlRegReg(dst
, dst
);
568 RegisterID scr
= claimScratch();
569 m_assembler
.loadConstant(imm
, scr
);
570 m_assembler
.cmplRegReg(scr
, dst
, SH4Condition(cond
));
574 void compare32(int offset
, RegisterID base
, RegisterID left
, RelationalCondition cond
)
576 RegisterID scr
= claimScratch();
578 m_assembler
.movlMemReg(base
, scr
);
579 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
584 if ((offset
< 0) || (offset
>= 64)) {
585 m_assembler
.loadConstant(offset
, scr
);
586 m_assembler
.addlRegReg(base
, scr
);
587 m_assembler
.movlMemReg(scr
, scr
);
588 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
593 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
594 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
598 void testImm(int imm
, int offset
, RegisterID base
)
600 RegisterID scr
= claimScratch();
601 load32(base
, offset
, scr
);
603 RegisterID scr1
= claimScratch();
604 move(TrustedImm32(imm
), scr1
);
606 m_assembler
.testlRegReg(scr
, scr1
);
608 releaseScratch(scr1
);
611 void testlImm(int imm
, RegisterID dst
)
613 if ((dst
== SH4Registers::r0
) && (imm
<= 255) && (imm
>= 0)) {
614 m_assembler
.testlImm8r(imm
, dst
);
618 RegisterID scr
= claimScratch();
619 m_assembler
.loadConstant(imm
, scr
);
620 m_assembler
.testlRegReg(scr
, dst
);
624 void compare32(RegisterID right
, int offset
, RegisterID base
, RelationalCondition cond
)
627 RegisterID scr
= claimScratch();
628 m_assembler
.movlMemReg(base
, scr
);
629 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
634 if ((offset
< 0) || (offset
>= 64)) {
635 RegisterID scr
= claimScratch();
636 m_assembler
.loadConstant(offset
, scr
);
637 m_assembler
.addlRegReg(base
, scr
);
638 m_assembler
.movlMemReg(scr
, scr
);
639 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
644 RegisterID scr
= claimScratch();
645 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
646 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
650 void compare32(int imm
, int offset
, RegisterID base
, RelationalCondition cond
)
652 RegisterID scr
= claimScratch();
653 load32(base
, offset
, scr
);
655 RegisterID scr1
= claimScratch();
656 move(TrustedImm32(imm
), scr1
);
658 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
660 releaseScratch(scr1
);
664 // Memory access operation
666 ALWAYS_INLINE
void loadEffectiveAddress(BaseIndex address
, RegisterID dest
, int extraoffset
= 0)
668 if (dest
== address
.base
) {
669 RegisterID scaledIndex
= claimScratch();
670 move(address
.index
, scaledIndex
);
671 lshift32(TrustedImm32(address
.scale
), scaledIndex
);
672 add32(scaledIndex
, dest
);
673 releaseScratch(scaledIndex
);
675 move(address
.index
, dest
);
676 lshift32(TrustedImm32(address
.scale
), dest
);
677 add32(address
.base
, dest
);
680 add32(TrustedImm32(address
.offset
+ extraoffset
), dest
);
683 void load32(ImplicitAddress address
, RegisterID dest
)
685 load32(address
.base
, address
.offset
, dest
);
688 void load8(ImplicitAddress address
, RegisterID dest
)
690 load8(address
.base
, address
.offset
, dest
);
693 void load8(BaseIndex address
, RegisterID dest
)
695 RegisterID scr
= claimScratch();
696 move(address
.index
, scr
);
697 lshift32(TrustedImm32(address
.scale
), scr
);
698 add32(address
.base
, scr
);
699 load8(scr
, address
.offset
, dest
);
703 void load8(AbsoluteAddress address
, RegisterID dest
)
705 move(TrustedImmPtr(address
.m_ptr
), dest
);
706 m_assembler
.movbMemReg(dest
, dest
);
707 m_assembler
.extub(dest
, dest
);
710 void load8(const void* address
, RegisterID dest
)
712 load8(AbsoluteAddress(address
), dest
);
715 void load8PostInc(RegisterID base
, RegisterID dest
)
717 m_assembler
.movbMemRegIn(base
, dest
);
718 m_assembler
.extub(dest
, dest
);
721 void load8SignedExtendTo32(BaseIndex address
, RegisterID dest
)
723 RegisterID scr
= claimScratch();
724 move(address
.index
, scr
);
725 lshift32(TrustedImm32(address
.scale
), scr
);
726 add32(address
.base
, scr
);
727 load8SignedExtendTo32(scr
, address
.offset
, dest
);
731 void load32(BaseIndex address
, RegisterID dest
)
733 RegisterID scr
= claimScratch();
734 move(address
.index
, scr
);
735 lshift32(TrustedImm32(address
.scale
), scr
);
736 add32(address
.base
, scr
);
737 load32(scr
, address
.offset
, dest
);
741 void load32(const void* address
, RegisterID dest
)
743 move(TrustedImmPtr(address
), dest
);
744 m_assembler
.movlMemReg(dest
, dest
);
747 void load32(RegisterID base
, int offset
, RegisterID dest
)
750 m_assembler
.movlMemReg(base
, dest
);
754 if ((offset
>= 0) && (offset
< 64)) {
755 m_assembler
.movlMemReg(offset
>> 2, base
, dest
);
759 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
761 m_assembler
.loadConstant(offset
, scr
);
762 if (base
== SH4Registers::r0
)
763 m_assembler
.movlR0mr(scr
, dest
);
765 m_assembler
.addlRegReg(base
, scr
);
766 m_assembler
.movlMemReg(scr
, dest
);
773 void load8SignedExtendTo32(RegisterID base
, int offset
, RegisterID dest
)
776 m_assembler
.movbMemReg(base
, dest
);
780 if ((offset
> 0) && (offset
<= 15) && (dest
== SH4Registers::r0
)) {
781 m_assembler
.movbMemReg(offset
, base
, dest
);
785 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
787 m_assembler
.loadConstant(offset
, scr
);
788 if (base
== SH4Registers::r0
)
789 m_assembler
.movbR0mr(scr
, dest
);
791 m_assembler
.addlRegReg(base
, scr
);
792 m_assembler
.movbMemReg(scr
, dest
);
799 void load8(RegisterID base
, int offset
, RegisterID dest
)
801 load8SignedExtendTo32(base
, offset
, dest
);
802 m_assembler
.extub(dest
, dest
);
805 void load32(RegisterID src
, RegisterID dst
)
807 m_assembler
.movlMemReg(src
, dst
);
810 void load16(ImplicitAddress address
, RegisterID dest
)
812 if (!address
.offset
) {
813 m_assembler
.movwMemReg(address
.base
, dest
);
814 m_assembler
.extuw(dest
, dest
);
818 if ((address
.offset
> 0) && (address
.offset
<= 30) && (dest
== SH4Registers::r0
)) {
819 m_assembler
.movwMemReg(address
.offset
>> 1, address
.base
, dest
);
820 m_assembler
.extuw(dest
, dest
);
824 RegisterID scr
= (dest
== address
.base
) ? claimScratch() : dest
;
826 m_assembler
.loadConstant(address
.offset
, scr
);
827 if (address
.base
== SH4Registers::r0
)
828 m_assembler
.movwR0mr(scr
, dest
);
830 m_assembler
.addlRegReg(address
.base
, scr
);
831 m_assembler
.movwMemReg(scr
, dest
);
833 m_assembler
.extuw(dest
, dest
);
835 if (dest
== address
.base
)
839 void load16Unaligned(BaseIndex address
, RegisterID dest
)
841 RegisterID scr
= claimScratch();
843 loadEffectiveAddress(address
, scr
);
845 RegisterID scr1
= claimScratch();
846 load8PostInc(scr
, scr1
);
848 m_assembler
.shllImm8r(8, dest
);
852 releaseScratch(scr1
);
855 void load16(RegisterID src
, RegisterID dest
)
857 m_assembler
.movwMemReg(src
, dest
);
858 m_assembler
.extuw(dest
, dest
);
861 void load16SignedExtendTo32(RegisterID src
, RegisterID dest
)
863 m_assembler
.movwMemReg(src
, dest
);
866 void load16(BaseIndex address
, RegisterID dest
)
868 load16SignedExtendTo32(address
, dest
);
869 m_assembler
.extuw(dest
, dest
);
872 void load16PostInc(RegisterID base
, RegisterID dest
)
874 m_assembler
.movwMemRegIn(base
, dest
);
875 m_assembler
.extuw(dest
, dest
);
878 void load16SignedExtendTo32(BaseIndex address
, RegisterID dest
)
880 RegisterID scr
= claimScratch();
882 move(address
.index
, scr
);
883 lshift32(TrustedImm32(address
.scale
), scr
);
884 add32(TrustedImm32(address
.offset
), scr
);
886 if (address
.base
== SH4Registers::r0
)
887 m_assembler
.movwR0mr(scr
, dest
);
889 add32(address
.base
, scr
);
890 load16SignedExtendTo32(scr
, dest
);
896 void store8(RegisterID src
, BaseIndex address
)
898 RegisterID scr
= claimScratch();
900 move(address
.index
, scr
);
901 lshift32(TrustedImm32(address
.scale
), scr
);
902 add32(TrustedImm32(address
.offset
), scr
);
904 if (address
.base
== SH4Registers::r0
)
905 m_assembler
.movbRegMemr0(src
, scr
);
907 add32(address
.base
, scr
);
908 m_assembler
.movbRegMem(src
, scr
);
914 void store8(RegisterID src
, void* address
)
916 RegisterID destptr
= claimScratch();
917 move(TrustedImmPtr(address
), destptr
);
918 m_assembler
.movbRegMem(src
, destptr
);
919 releaseScratch(destptr
);
922 void store8(TrustedImm32 imm
, void* address
)
924 ASSERT((imm
.m_value
>= -128) && (imm
.m_value
<= 127));
925 RegisterID dstptr
= claimScratch();
926 move(TrustedImmPtr(address
), dstptr
);
927 RegisterID srcval
= claimScratch();
929 m_assembler
.movbRegMem(srcval
, dstptr
);
930 releaseScratch(dstptr
);
931 releaseScratch(srcval
);
934 void store8(TrustedImm32 imm
, Address address
)
936 ASSERT((imm
.m_value
>= -128) && (imm
.m_value
<= 127));
937 RegisterID dstptr
= claimScratch();
938 move(address
.base
, dstptr
);
939 add32(TrustedImm32(address
.offset
), dstptr
);
940 RegisterID srcval
= claimScratch();
942 m_assembler
.movbRegMem(srcval
, dstptr
);
943 releaseScratch(dstptr
);
944 releaseScratch(srcval
);
947 void store16(RegisterID src
, BaseIndex address
)
949 RegisterID scr
= claimScratch();
951 move(address
.index
, scr
);
952 lshift32(TrustedImm32(address
.scale
), scr
);
953 add32(TrustedImm32(address
.offset
), scr
);
955 if (address
.base
== SH4Registers::r0
)
956 m_assembler
.movwRegMemr0(src
, scr
);
958 add32(address
.base
, scr
);
959 m_assembler
.movwRegMem(src
, scr
);
965 void store32(RegisterID src
, ImplicitAddress address
)
967 if (!address
.offset
) {
968 m_assembler
.movlRegMem(src
, address
.base
);
972 if ((address
.offset
>= 0) && (address
.offset
< 64)) {
973 m_assembler
.movlRegMem(src
, address
.offset
>> 2, address
.base
);
977 RegisterID scr
= claimScratch();
978 m_assembler
.loadConstant(address
.offset
, scr
);
979 if (address
.base
== SH4Registers::r0
)
980 m_assembler
.movlRegMemr0(src
, scr
);
982 m_assembler
.addlRegReg(address
.base
, scr
);
983 m_assembler
.movlRegMem(src
, scr
);
988 void store32(RegisterID src
, RegisterID dst
)
990 m_assembler
.movlRegMem(src
, dst
);
993 void store32(TrustedImm32 imm
, ImplicitAddress address
)
995 RegisterID scr
= claimScratch();
996 m_assembler
.loadConstant(imm
.m_value
, scr
);
997 store32(scr
, address
);
1001 void store32(RegisterID src
, BaseIndex address
)
1003 RegisterID scr
= claimScratch();
1005 move(address
.index
, scr
);
1006 lshift32(TrustedImm32(address
.scale
), scr
);
1007 add32(address
.base
, scr
);
1008 store32(src
, Address(scr
, address
.offset
));
1010 releaseScratch(scr
);
1013 void store32(TrustedImm32 imm
, void* address
)
1015 RegisterID scr
= claimScratch();
1016 RegisterID scr1
= claimScratch();
1017 m_assembler
.loadConstant(imm
.m_value
, scr
);
1018 move(TrustedImmPtr(address
), scr1
);
1019 m_assembler
.movlRegMem(scr
, scr1
);
1020 releaseScratch(scr
);
1021 releaseScratch(scr1
);
1024 void store32(RegisterID src
, void* address
)
1026 RegisterID scr
= claimScratch();
1027 move(TrustedImmPtr(address
), scr
);
1028 m_assembler
.movlRegMem(src
, scr
);
1029 releaseScratch(scr
);
1032 void store32(TrustedImm32 imm
, BaseIndex address
)
1034 RegisterID destptr
= claimScratch();
1036 loadEffectiveAddress(address
, destptr
);
1038 RegisterID srcval
= claimScratch();
1040 m_assembler
.movlRegMem(srcval
, destptr
);
1041 releaseScratch(srcval
);
1042 releaseScratch(destptr
);
1045 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
1047 RegisterID scr
= claimScratch();
1048 DataLabel32
label(this);
1049 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
1050 m_assembler
.addlRegReg(address
.base
, scr
);
1051 m_assembler
.movlMemReg(scr
, dest
);
1052 releaseScratch(scr
);
1056 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
1058 RegisterID scr
= claimScratch();
1059 DataLabel32
label(this);
1060 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
1061 m_assembler
.addlRegReg(address
.base
, scr
);
1062 m_assembler
.movlRegMem(src
, scr
);
1063 releaseScratch(scr
);
1067 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
1069 DataLabelCompact
dataLabel(this);
1070 ASSERT(isCompactPtrAlignedAddressOffset(address
.offset
));
1071 m_assembler
.movlMemRegCompact(address
.offset
>> 2, address
.base
, dest
);
1075 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
1077 ConvertibleLoadLabel
result(this);
1079 RegisterID scr
= claimScratch();
1080 m_assembler
.movImm8(address
.offset
, scr
);
1081 m_assembler
.addlRegReg(address
.base
, scr
);
1082 m_assembler
.movlMemReg(scr
, dest
);
1083 releaseScratch(scr
);
1088 // Floating-point operations
1090 static bool supportsFloatingPoint() { return true; }
1091 static bool supportsFloatingPointTruncate() { return true; }
1092 static bool supportsFloatingPointSqrt() { return true; }
1093 static bool supportsFloatingPointAbs() { return true; }
1095 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
1097 m_assembler
.fldsfpul((FPRegisterID
)(src
+ 1));
1098 m_assembler
.stsfpulReg(dest1
);
1099 m_assembler
.fldsfpul(src
);
1100 m_assembler
.stsfpulReg(dest2
);
1103 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID
)
1105 m_assembler
.ldsrmfpul(src1
);
1106 m_assembler
.fstsfpul((FPRegisterID
)(dest
+ 1));
1107 m_assembler
.ldsrmfpul(src2
);
1108 m_assembler
.fstsfpul(dest
);
1111 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
1114 m_assembler
.fmovsRegReg((FPRegisterID
)(src
+ 1), (FPRegisterID
)(dest
+ 1));
1115 m_assembler
.fmovsRegReg(src
, dest
);
1119 void swapDouble(FPRegisterID fr1
, FPRegisterID fr2
)
1122 m_assembler
.fldsfpul((FPRegisterID
)(fr1
+ 1));
1123 m_assembler
.fmovsRegReg((FPRegisterID
)(fr2
+ 1), (FPRegisterID
)(fr1
+ 1));
1124 m_assembler
.fstsfpul((FPRegisterID
)(fr2
+ 1));
1125 m_assembler
.fldsfpul(fr1
);
1126 m_assembler
.fmovsRegReg(fr2
, fr1
);
1127 m_assembler
.fstsfpul(fr2
);
1131 void loadFloat(BaseIndex address
, FPRegisterID dest
)
1133 RegisterID scr
= claimScratch();
1135 loadEffectiveAddress(address
, scr
);
1137 m_assembler
.fmovsReadrm(scr
, dest
);
1138 releaseScratch(scr
);
1141 void loadDouble(BaseIndex address
, FPRegisterID dest
)
1143 RegisterID scr
= claimScratch();
1145 loadEffectiveAddress(address
, scr
);
1147 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1148 m_assembler
.fmovsReadrm(scr
, dest
);
1149 releaseScratch(scr
);
1152 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1154 RegisterID scr
= claimScratch();
1156 m_assembler
.loadConstant(address
.offset
, scr
);
1157 if (address
.base
== SH4Registers::r0
) {
1158 m_assembler
.fmovsReadr0r(scr
, (FPRegisterID
)(dest
+ 1));
1159 m_assembler
.addlImm8r(4, scr
);
1160 m_assembler
.fmovsReadr0r(scr
, dest
);
1161 releaseScratch(scr
);
1165 m_assembler
.addlRegReg(address
.base
, scr
);
1166 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1167 m_assembler
.fmovsReadrm(scr
, dest
);
1168 releaseScratch(scr
);
1171 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
1173 RegisterID scr
= claimScratch();
1175 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1176 m_assembler
.fmovsReadrm(scr
, dest
);
1177 releaseScratch(scr
);
1180 void storeFloat(FPRegisterID src
, BaseIndex address
)
1182 RegisterID scr
= claimScratch();
1183 loadEffectiveAddress(address
, scr
);
1184 m_assembler
.fmovsWriterm(src
, scr
);
1185 releaseScratch(scr
);
1188 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1190 RegisterID scr
= claimScratch();
1191 m_assembler
.loadConstant(address
.offset
+ 8, scr
);
1192 m_assembler
.addlRegReg(address
.base
, scr
);
1193 m_assembler
.fmovsWriterndec(src
, scr
);
1194 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1195 releaseScratch(scr
);
1198 void storeDouble(FPRegisterID src
, BaseIndex address
)
1200 RegisterID scr
= claimScratch();
1202 loadEffectiveAddress(address
, scr
, 8);
1204 m_assembler
.fmovsWriterndec(src
, scr
);
1205 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1207 releaseScratch(scr
);
1210 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1213 addDouble(op2
, dest
);
1215 moveDouble(op2
, dest
);
1216 addDouble(op1
, dest
);
1220 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
1222 RegisterID scr
= claimScratch();
1223 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address
.m_value
)) + 8, scr
);
1224 m_assembler
.fmovsWriterndec(src
, scr
);
1225 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1226 releaseScratch(scr
);
1229 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1231 m_assembler
.daddRegReg(src
, dest
);
1234 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1236 loadDouble(TrustedImmPtr(address
.m_ptr
), fscratch
);
1237 addDouble(fscratch
, dest
);
1240 void addDouble(Address address
, FPRegisterID dest
)
1242 loadDouble(address
, fscratch
);
1243 addDouble(fscratch
, dest
);
1246 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1248 m_assembler
.dsubRegReg(src
, dest
);
1251 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1254 moveDouble(op1
, fscratch
);
1255 subDouble(op2
, fscratch
);
1256 moveDouble(fscratch
, dest
);
1258 moveDouble(op1
, dest
);
1259 subDouble(op2
, dest
);
1263 void subDouble(Address address
, FPRegisterID dest
)
1265 loadDouble(address
, fscratch
);
1266 subDouble(fscratch
, dest
);
1269 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1271 m_assembler
.dmulRegReg(src
, dest
);
1274 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1277 mulDouble(op2
, dest
);
1279 moveDouble(op2
, dest
);
1280 mulDouble(op1
, dest
);
1284 void mulDouble(Address address
, FPRegisterID dest
)
1286 loadDouble(address
, fscratch
);
1287 mulDouble(fscratch
, dest
);
1290 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1292 m_assembler
.ddivRegReg(src
, dest
);
1295 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1298 moveDouble(op1
, fscratch
);
1299 divDouble(op2
, fscratch
);
1300 moveDouble(fscratch
, dest
);
1302 moveDouble(op1
, dest
);
1303 divDouble(op2
, dest
);
1307 void negateDouble(FPRegisterID src
, FPRegisterID dest
)
1309 moveDouble(src
, dest
);
1310 m_assembler
.dneg(dest
);
1313 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
1315 m_assembler
.fldsfpul(src
);
1316 m_assembler
.dcnvsd(dst
);
1319 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
1321 m_assembler
.dcnvds(src
);
1322 m_assembler
.fstsfpul(dst
);
1325 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1327 m_assembler
.ldsrmfpul(src
);
1328 m_assembler
.floatfpulDreg(dest
);
1331 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
1333 RegisterID scr
= claimScratch();
1334 load32(src
.m_ptr
, scr
);
1335 convertInt32ToDouble(scr
, dest
);
1336 releaseScratch(scr
);
1339 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
1341 RegisterID scr
= claimScratch();
1343 convertInt32ToDouble(scr
, dest
);
1344 releaseScratch(scr
);
1347 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
1349 RegisterID scr
= claimScratch();
1353 loadEffectiveAddress(address
, scr
);
1355 RegisterID scr1
= claimScratch();
1356 if (dest
!= SH4Registers::r0
)
1357 move(SH4Registers::r0
, scr1
);
1359 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 58, sizeof(uint32_t));
1360 move(scr
, SH4Registers::r0
);
1361 m_assembler
.testlImm8r(0x3, SH4Registers::r0
);
1362 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1364 if (dest
!= SH4Registers::r0
)
1365 move(scr1
, SH4Registers::r0
);
1368 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1371 m_assembler
.testlImm8r(0x1, SH4Registers::r0
);
1373 if (dest
!= SH4Registers::r0
)
1374 move(scr1
, SH4Registers::r0
);
1376 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1377 load16PostInc(scr
, scr1
);
1379 m_assembler
.shllImm8r(16, dest
);
1381 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1384 load8PostInc(scr
, scr1
);
1385 load16PostInc(scr
, dest
);
1386 m_assembler
.shllImm8r(8, dest
);
1389 m_assembler
.shllImm8r(8, dest
);
1390 m_assembler
.shllImm8r(16, dest
);
1394 releaseScratch(scr
);
1395 releaseScratch(scr1
);
1398 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1400 RegisterID scr
= scratchReg3
;
1401 load32WithUnalignedHalfWords(left
, scr
);
1402 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1403 m_assembler
.testlRegReg(scr
, scr
);
1405 compare32(right
.m_value
, scr
, cond
);
1407 if (cond
== NotEqual
)
1408 return branchFalse();
1409 return branchTrue();
1412 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
1414 m_assembler
.movImm8(0, scratchReg3
);
1415 convertInt32ToDouble(scratchReg3
, scratch
);
1416 return branchDouble(DoubleNotEqual
, reg
, scratch
);
1419 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
1421 m_assembler
.movImm8(0, scratchReg3
);
1422 convertInt32ToDouble(scratchReg3
, scratch
);
1423 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
1426 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1428 if (cond
== DoubleEqual
) {
1429 m_assembler
.dcmppeq(right
, left
);
1430 return branchTrue();
1433 if (cond
== DoubleNotEqual
) {
1435 m_assembler
.dcmppeq(left
, left
);
1436 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1437 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1438 m_assembler
.dcmppeq(right
, right
);
1439 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1440 m_assembler
.dcmppeq(right
, left
);
1441 Jump m_jump
= branchFalse();
1446 if (cond
== DoubleGreaterThan
) {
1447 m_assembler
.dcmppgt(right
, left
);
1448 return branchTrue();
1451 if (cond
== DoubleGreaterThanOrEqual
) {
1453 m_assembler
.dcmppeq(left
, left
);
1454 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1455 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1456 m_assembler
.dcmppeq(right
, right
);
1457 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1458 m_assembler
.dcmppgt(left
, right
);
1459 Jump m_jump
= branchFalse();
1464 if (cond
== DoubleLessThan
) {
1465 m_assembler
.dcmppgt(left
, right
);
1466 return branchTrue();
1469 if (cond
== DoubleLessThanOrEqual
) {
1471 m_assembler
.dcmppeq(left
, left
);
1472 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1473 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1474 m_assembler
.dcmppeq(right
, right
);
1475 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1476 m_assembler
.dcmppgt(right
, left
);
1477 Jump m_jump
= branchFalse();
1482 if (cond
== DoubleEqualOrUnordered
) {
1483 JumpList takeBranch
;
1484 m_assembler
.dcmppeq(left
, left
);
1485 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1486 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1487 m_assembler
.dcmppeq(right
, right
);
1488 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1489 m_assembler
.dcmppeq(left
, right
);
1490 m_assembler
.branch(BF_OPCODE
, 2);
1491 takeBranch
.link(this);
1492 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1495 if (cond
== DoubleGreaterThanOrUnordered
) {
1496 JumpList takeBranch
;
1497 m_assembler
.dcmppeq(left
, left
);
1498 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1499 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1500 m_assembler
.dcmppeq(right
, right
);
1501 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1502 m_assembler
.dcmppgt(right
, left
);
1503 m_assembler
.branch(BF_OPCODE
, 2);
1504 takeBranch
.link(this);
1505 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1508 if (cond
== DoubleGreaterThanOrEqualOrUnordered
) {
1509 m_assembler
.dcmppgt(left
, right
);
1510 return branchFalse();
1513 if (cond
== DoubleLessThanOrUnordered
) {
1514 JumpList takeBranch
;
1515 m_assembler
.dcmppeq(left
, left
);
1516 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1517 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1518 m_assembler
.dcmppeq(right
, right
);
1519 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1520 m_assembler
.dcmppgt(left
, right
);
1521 m_assembler
.branch(BF_OPCODE
, 2);
1522 takeBranch
.link(this);
1523 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1526 if (cond
== DoubleLessThanOrEqualOrUnordered
) {
1527 m_assembler
.dcmppgt(right
, left
);
1528 return branchFalse();
1531 ASSERT(cond
== DoubleNotEqualOrUnordered
);
1532 m_assembler
.dcmppeq(right
, left
);
1533 return branchFalse();
1538 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1539 m_assembler
.branch(BF_OPCODE
, 2);
1540 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1545 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1546 m_assembler
.branch(BT_OPCODE
, 2);
1547 return Jump(m_assembler
.extraInstrForBranch(scratchReg3
));
1550 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1552 RegisterID scr
= claimScratch();
1553 move(left
.index
, scr
);
1554 lshift32(TrustedImm32(left
.scale
), scr
);
1555 add32(left
.base
, scr
);
1556 load32(scr
, left
.offset
, scr
);
1557 compare32(right
.m_value
, scr
, cond
);
1558 releaseScratch(scr
);
1560 if (cond
== NotEqual
)
1561 return branchFalse();
1562 return branchTrue();
1565 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1567 moveDouble(src
, dest
);
1568 m_assembler
.dsqrt(dest
);
1571 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1573 moveDouble(src
, dest
);
1574 m_assembler
.dabs(dest
);
1577 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1579 RegisterID addressTempRegister
= claimScratch();
1580 load8(address
, addressTempRegister
);
1581 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1582 releaseScratch(addressTempRegister
);
1586 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1588 RegisterID addressTempRegister
= claimScratch();
1589 load8(address
, addressTempRegister
);
1590 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1591 releaseScratch(addressTempRegister
);
1595 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1597 RegisterID addressTempRegister
= claimScratch();
1598 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
1599 load8(Address(addressTempRegister
), addressTempRegister
);
1600 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1601 releaseScratch(addressTempRegister
);
1605 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1610 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1615 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1617 RegisterID addressTempRegister
= claimScratch();
1618 load8(left
, addressTempRegister
);
1619 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1620 releaseScratch(addressTempRegister
);
1624 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1626 RegisterID addressTempRegister
= claimScratch();
1627 load8(left
, addressTempRegister
);
1628 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1629 releaseScratch(addressTempRegister
);
1633 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1635 RegisterID addressTempRegister
= claimScratch();
1636 load8(left
, addressTempRegister
);
1637 compare32(cond
, addressTempRegister
, right
, dest
);
1638 releaseScratch(addressTempRegister
);
1641 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1642 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1645 truncateDoubleToInt32(src
, dest
);
1646 RegisterID intscr
= claimScratch();
1647 m_assembler
.loadConstant(0x7fffffff, intscr
);
1648 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1649 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 12, sizeof(uint32_t));
1650 if (branchType
== BranchIfTruncateFailed
) {
1651 m_assembler
.branch(BT_OPCODE
, 2);
1652 m_assembler
.addlImm8r(1, intscr
);
1653 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1654 result
= branchTrue();
1656 Jump out
= Jump(m_assembler
.je(), SH4Assembler::JumpNear
);
1657 m_assembler
.addlImm8r(1, intscr
);
1658 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1659 result
= branchFalse();
1662 releaseScratch(intscr
);
1666 Jump
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1669 RegisterID intscr
= claimScratch();
1670 m_assembler
.loadConstant(0x80000000, intscr
);
1671 convertInt32ToDouble(intscr
, fscratch
);
1672 addDouble(src
, fscratch
);
1673 truncateDoubleToInt32(fscratch
, dest
);
1674 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1675 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 16, sizeof(uint32_t));
1676 if (branchType
== BranchIfTruncateFailed
) {
1677 m_assembler
.branch(BT_OPCODE
, 4);
1678 m_assembler
.addlImm8r(-1, intscr
);
1679 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1680 m_assembler
.addlImm8r(1, intscr
);
1681 m_assembler
.sublRegReg(intscr
, dest
);
1682 result
= branchTrue();
1684 Jump out
= Jump(m_assembler
.je(), SH4Assembler::JumpNear
);
1685 m_assembler
.addlImm8r(-1, intscr
);
1686 m_assembler
.cmplRegReg(dest
, intscr
, SH4Condition(Equal
));
1687 m_assembler
.addlImm8r(1, intscr
);
1688 m_assembler
.sublRegReg(intscr
, dest
);
1689 result
= branchFalse();
1692 releaseScratch(intscr
);
1696 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1698 m_assembler
.ftrcdrmfpul(src
);
1699 m_assembler
.stsfpulReg(dest
);
1702 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
1704 RegisterID intscr
= claimScratch();
1705 m_assembler
.loadConstant(0x80000000, intscr
);
1706 convertInt32ToDouble(intscr
, fscratch
);
1707 addDouble(src
, fscratch
);
1708 m_assembler
.ftrcdrmfpul(fscratch
);
1709 m_assembler
.stsfpulReg(dest
);
1710 m_assembler
.sublRegReg(intscr
, dest
);
1711 releaseScratch(intscr
);
1714 // Stack manipulation operations
1716 void pop(RegisterID dest
)
1718 m_assembler
.popReg(dest
);
1721 void push(RegisterID src
)
1723 m_assembler
.pushReg(src
);
1726 void push(TrustedImm32 imm
)
1728 RegisterID scr
= claimScratch();
1729 m_assembler
.loadConstant(imm
.m_value
, scr
);
1731 releaseScratch(scr
);
1734 // Register move operations
1736 void move(TrustedImm32 imm
, RegisterID dest
)
1738 m_assembler
.loadConstant(imm
.m_value
, dest
);
1741 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
1743 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
, sizeof(uint32_t));
1744 DataLabelPtr
dataLabel(this);
1745 m_assembler
.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue
.m_value
), dest
);
1749 DataLabel32
moveWithPatch(TrustedImm32 initialValue
, RegisterID dest
)
1751 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
, sizeof(uint32_t));
1752 DataLabel32
dataLabel(this);
1753 m_assembler
.loadConstantUnReusable(static_cast<uint32_t>(initialValue
.m_value
), dest
);
1757 void move(RegisterID src
, RegisterID dest
)
1760 m_assembler
.movlRegReg(src
, dest
);
1763 void move(TrustedImmPtr imm
, RegisterID dest
)
1765 m_assembler
.loadConstant(imm
.asIntptr(), dest
);
1768 void swap(RegisterID reg1
, RegisterID reg2
)
1777 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1779 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1780 if (cond
!= NotEqual
) {
1781 m_assembler
.movt(dest
);
1785 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1786 m_assembler
.movImm8(0, dest
);
1787 m_assembler
.branch(BT_OPCODE
, 0);
1788 m_assembler
.movImm8(1, dest
);
1791 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1795 compare32(cond
, left
, dest
, dest
);
1799 RegisterID scr
= claimScratch();
1801 compare32(cond
, left
, scr
, dest
);
1802 releaseScratch(scr
);
1805 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1807 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1809 load8(address
, dest
);
1810 if (mask
.m_value
== -1)
1811 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1813 testlImm(mask
.m_value
, dest
);
1814 if (cond
!= NonZero
) {
1815 m_assembler
.movt(dest
);
1819 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1820 m_assembler
.movImm8(0, dest
);
1821 m_assembler
.branch(BT_OPCODE
, 0);
1822 m_assembler
.movImm8(1, dest
);
1825 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1827 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1829 load32(address
, dest
);
1830 if (mask
.m_value
== -1)
1831 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1833 testlImm(mask
.m_value
, dest
);
1834 if (cond
!= NonZero
) {
1835 m_assembler
.movt(dest
);
1839 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1840 m_assembler
.movImm8(0, dest
);
1841 m_assembler
.branch(BT_OPCODE
, 0);
1842 m_assembler
.movImm8(1, dest
);
1845 void loadPtrLinkReg(ImplicitAddress address
)
1847 RegisterID scr
= claimScratch();
1848 load32(address
, scr
);
1849 m_assembler
.ldspr(scr
);
1850 releaseScratch(scr
);
1853 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1855 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1856 /* BT label => BF off
1861 if (cond
== NotEqual
)
1862 return branchFalse();
1863 return branchTrue();
1866 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1868 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1869 m_assembler
.testlRegReg(left
, left
);
1871 compare32(right
.m_value
, left
, cond
);
1873 if (cond
== NotEqual
)
1874 return branchFalse();
1875 return branchTrue();
1878 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1880 compare32(right
.offset
, right
.base
, left
, cond
);
1881 if (cond
== NotEqual
)
1882 return branchFalse();
1883 return branchTrue();
1886 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1888 compare32(right
, left
.offset
, left
.base
, cond
);
1889 if (cond
== NotEqual
)
1890 return branchFalse();
1891 return branchTrue();
1894 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1896 compare32(right
.m_value
, left
.offset
, left
.base
, cond
);
1897 if (cond
== NotEqual
)
1898 return branchFalse();
1899 return branchTrue();
1902 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1904 RegisterID scr
= claimScratch();
1906 load32(left
.m_ptr
, scr
);
1907 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
1908 releaseScratch(scr
);
1910 if (cond
== NotEqual
)
1911 return branchFalse();
1912 return branchTrue();
1915 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1917 RegisterID addressTempRegister
= claimScratch();
1919 move(TrustedImmPtr(left
.m_ptr
), addressTempRegister
);
1920 m_assembler
.movlMemReg(addressTempRegister
, addressTempRegister
);
1921 compare32(right
.m_value
, addressTempRegister
, cond
);
1922 releaseScratch(addressTempRegister
);
1924 if (cond
== NotEqual
)
1925 return branchFalse();
1926 return branchTrue();
1929 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1931 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1932 RegisterID lefttmp
= claimScratch();
1934 loadEffectiveAddress(left
, lefttmp
);
1936 load8(lefttmp
, lefttmp
);
1937 RegisterID righttmp
= claimScratch();
1938 m_assembler
.loadConstant(right
.m_value
, righttmp
);
1940 Jump result
= branch32(cond
, lefttmp
, righttmp
);
1941 releaseScratch(lefttmp
);
1942 releaseScratch(righttmp
);
1946 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1948 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1950 m_assembler
.testlRegReg(reg
, mask
);
1952 if (cond
== NonZero
) // NotEqual
1953 return branchFalse();
1954 return branchTrue();
1957 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1959 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1961 if (mask
.m_value
== -1)
1962 m_assembler
.testlRegReg(reg
, reg
);
1964 testlImm(mask
.m_value
, reg
);
1966 if (cond
== NonZero
) // NotEqual
1967 return branchFalse();
1968 return branchTrue();
1971 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1973 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1975 if (mask
.m_value
== -1)
1976 compare32(0, address
.offset
, address
.base
, static_cast<RelationalCondition
>(cond
));
1978 testImm(mask
.m_value
, address
.offset
, address
.base
);
1980 if (cond
== NonZero
) // NotEqual
1981 return branchFalse();
1982 return branchTrue();
1985 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1987 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1989 RegisterID scr
= claimScratch();
1991 move(address
.index
, scr
);
1992 lshift32(TrustedImm32(address
.scale
), scr
);
1993 add32(address
.base
, scr
);
1994 load32(scr
, address
.offset
, scr
);
1996 if (mask
.m_value
== -1)
1997 m_assembler
.testlRegReg(scr
, scr
);
1999 testlImm(mask
.m_value
, scr
);
2001 releaseScratch(scr
);
2003 if (cond
== NonZero
) // NotEqual
2004 return branchFalse();
2005 return branchTrue();
2010 return Jump(m_assembler
.jmp());
2013 void jump(RegisterID target
)
2015 m_assembler
.jmpReg(target
);
2018 void jump(Address address
)
2020 RegisterID scr
= claimScratch();
2021 load32(address
, scr
);
2022 m_assembler
.jmpReg(scr
);
2023 releaseScratch(scr
);
2026 void jump(AbsoluteAddress address
)
2028 RegisterID scr
= claimScratch();
2030 move(TrustedImmPtr(address
.m_ptr
), scr
);
2031 m_assembler
.movlMemReg(scr
, scr
);
2032 m_assembler
.jmpReg(scr
);
2033 releaseScratch(scr
);
2036 // Arithmetic control flow operations
2038 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
2040 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2042 if (cond
== Overflow
)
2043 return branchMul32(cond
, TrustedImm32(-1), srcDest
, srcDest
);
2047 if (cond
== Signed
) {
2048 m_assembler
.cmppz(srcDest
);
2049 return branchFalse();
2052 compare32(0, srcDest
, Equal
);
2053 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2056 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2058 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2060 if (cond
== Overflow
) {
2061 m_assembler
.addvlRegReg(src
, dest
);
2062 return branchTrue();
2065 m_assembler
.addlRegReg(src
, dest
);
2067 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2068 m_assembler
.cmppz(dest
);
2069 return (cond
== Signed
) ? branchFalse() : branchTrue();
2072 compare32(0, dest
, Equal
);
2073 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2076 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2078 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2080 if (cond
== Overflow
) {
2082 m_assembler
.addvlRegReg(src2
, dest
);
2085 m_assembler
.addvlRegReg(src1
, dest
);
2087 return branchTrue();
2090 add32(src1
, src2
, dest
);
2092 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2093 m_assembler
.cmppz(dest
);
2094 return (cond
== Signed
) ? branchFalse() : branchTrue();
2097 compare32(0, dest
, Equal
);
2098 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2101 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2103 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2105 RegisterID immval
= claimScratch();
2107 Jump result
= branchAdd32(cond
, immval
, dest
);
2108 releaseScratch(immval
);
2112 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
2114 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2118 if (cond
== Overflow
) {
2119 move(imm
, scratchReg3
);
2120 m_assembler
.addvlRegReg(scratchReg3
, dest
);
2121 return branchTrue();
2126 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2127 m_assembler
.cmppz(dest
);
2128 return (cond
== Signed
) ? branchFalse() : branchTrue();
2131 compare32(0, dest
, Equal
);
2132 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2135 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
2137 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2140 move(imm
, scratchReg3
);
2141 RegisterID destptr
= claimScratch();
2142 RegisterID destval
= claimScratch();
2143 move(TrustedImmPtr(dest
.m_ptr
), destptr
);
2144 m_assembler
.movlMemReg(destptr
, destval
);
2145 if (cond
== Overflow
) {
2146 m_assembler
.addvlRegReg(scratchReg3
, destval
);
2149 m_assembler
.addlRegReg(scratchReg3
, destval
);
2150 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2151 m_assembler
.cmppz(destval
);
2152 result
= (cond
== PositiveOrZero
);
2154 m_assembler
.testlRegReg(destval
, destval
);
2155 result
= (cond
!= NonZero
);
2158 m_assembler
.movlRegMem(destval
, destptr
);
2159 releaseScratch(destval
);
2160 releaseScratch(destptr
);
2161 return result
? branchTrue() : branchFalse();
2164 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
2166 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
2168 if (cond
== Overflow
) {
2169 RegisterID srcVal
= claimScratch();
2170 load32(src
, srcVal
);
2171 m_assembler
.addvlRegReg(srcVal
, dest
);
2172 releaseScratch(srcVal
);
2173 return branchTrue();
2178 if ((cond
== Signed
) || (cond
== PositiveOrZero
)) {
2179 m_assembler
.cmppz(dest
);
2180 return (cond
== Signed
) ? branchFalse() : branchTrue();
2183 compare32(0, dest
, Equal
);
2184 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2187 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2189 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2191 if (cond
== Overflow
) {
2192 RegisterID scrsign
= claimScratch();
2193 RegisterID msbres
= claimScratch();
2194 m_assembler
.dmulslRegReg(src
, dest
);
2195 m_assembler
.stsmacl(dest
);
2196 m_assembler
.cmppz(dest
);
2197 m_assembler
.movt(scrsign
);
2198 m_assembler
.addlImm8r(-1, scrsign
);
2199 m_assembler
.stsmach(msbres
);
2200 m_assembler
.cmplRegReg(msbres
, scrsign
, SH4Condition(Equal
));
2201 releaseScratch(msbres
);
2202 releaseScratch(scrsign
);
2203 return branchFalse();
2208 if (cond
== Signed
) {
2209 m_assembler
.cmppz(dest
);
2210 return branchFalse();
2213 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2214 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2217 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2219 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2221 if (cond
== Overflow
) {
2222 RegisterID scrsign
= claimScratch();
2223 RegisterID msbres
= claimScratch();
2224 m_assembler
.dmulslRegReg(src1
, src2
);
2225 m_assembler
.stsmacl(dest
);
2226 m_assembler
.cmppz(dest
);
2227 m_assembler
.movt(scrsign
);
2228 m_assembler
.addlImm8r(-1, scrsign
);
2229 m_assembler
.stsmach(msbres
);
2230 m_assembler
.cmplRegReg(msbres
, scrsign
, SH4Condition(Equal
));
2231 releaseScratch(msbres
);
2232 releaseScratch(scrsign
);
2233 return branchFalse();
2236 mul32(src1
, src2
, dest
);
2238 if (cond
== Signed
) {
2239 m_assembler
.cmppz(dest
);
2240 return branchFalse();
2243 compare32(0, dest
, Equal
);
2244 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2247 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
2249 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2252 move(imm
, scratchReg3
);
2253 return branchMul32(cond
, scratchReg3
, dest
);
2257 return branchMul32(cond
, src
, dest
);
2260 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2262 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2264 if (cond
== Overflow
) {
2265 m_assembler
.subvlRegReg(src
, dest
);
2266 return branchTrue();
2271 if (cond
== Signed
) {
2272 m_assembler
.cmppz(dest
);
2273 return branchFalse();
2276 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2277 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2280 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
2282 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2284 RegisterID immval
= claimScratch();
2286 Jump result
= branchSub32(cond
, immval
, dest
);
2287 releaseScratch(immval
);
2291 Jump
branchSub32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
2293 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2296 return branchSub32(cond
, imm
, dest
);
2299 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
2301 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2305 return branchSub32(cond
, src2
, dest
);
2308 if (cond
== Overflow
) {
2309 RegisterID tmpval
= claimScratch();
2311 m_assembler
.subvlRegReg(src2
, tmpval
);
2313 releaseScratch(tmpval
);
2314 return branchTrue();
2317 RegisterID tmpval
= claimScratch();
2319 sub32(src2
, tmpval
);
2321 releaseScratch(tmpval
);
2323 if (cond
== Signed
) {
2324 m_assembler
.cmppz(dest
);
2325 return branchFalse();
2328 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2329 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2332 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
2334 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
2338 if (cond
== Signed
) {
2339 m_assembler
.cmppz(dest
);
2340 return branchFalse();
2343 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
2344 return (cond
== NonZero
) ? branchFalse() : branchTrue();
2347 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID
, bool negZeroCheck
= true)
2349 truncateDoubleToInt32(src
, dest
);
2350 convertInt32ToDouble(dest
, fscratch
);
2351 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fscratch
, src
));
2354 failureCases
.append(branch32(Equal
, dest
, TrustedImm32(0)));
2357 void neg32(RegisterID dst
)
2359 m_assembler
.neg(dst
, dst
);
2362 void urshift32(RegisterID shiftamount
, RegisterID dest
)
2364 RegisterID shiftTmp
= claimScratch();
2365 m_assembler
.loadConstant(0x1f, shiftTmp
);
2366 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
2367 m_assembler
.neg(shiftTmp
, shiftTmp
);
2368 m_assembler
.shldRegReg(dest
, shiftTmp
);
2369 releaseScratch(shiftTmp
);
2372 void urshift32(RegisterID src
, RegisterID shiftAmount
, RegisterID dest
)
2375 urshift32(shiftAmount
, dest
);
2378 void urshift32(TrustedImm32 imm
, RegisterID dest
)
2380 int immMasked
= imm
.m_value
& 0x1f;
2384 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
2385 m_assembler
.shlrImm8r(immMasked
, dest
);
2389 RegisterID shiftTmp
= claimScratch();
2390 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
2391 m_assembler
.shldRegReg(dest
, shiftTmp
);
2392 releaseScratch(shiftTmp
);
2395 void urshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
2398 urshift32(shiftamount
, dest
);
2403 return Call(m_assembler
.call(), Call::Linkable
);
2408 return Call(m_assembler
.call(), Call::LinkableNear
);
2411 Call
call(RegisterID target
)
2413 return Call(m_assembler
.call(target
), Call::None
);
2416 void call(Address address
)
2418 RegisterID target
= claimScratch();
2419 load32(address
.base
, address
.offset
, target
);
2420 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
2421 m_assembler
.branch(JSR_OPCODE
, target
);
2423 releaseScratch(target
);
2428 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
2433 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2435 RegisterID dataTempRegister
= claimScratch();
2437 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 10, 2 * sizeof(uint32_t));
2438 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
2439 m_assembler
.cmplRegReg(dataTempRegister
, left
, SH4Condition(cond
));
2440 releaseScratch(dataTempRegister
);
2442 if (cond
== NotEqual
)
2443 return branchFalse();
2444 return branchTrue();
2447 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2449 RegisterID scr
= claimScratch();
2451 m_assembler
.loadConstant(left
.offset
, scr
);
2452 m_assembler
.addlRegReg(left
.base
, scr
);
2453 m_assembler
.movlMemReg(scr
, scr
);
2454 RegisterID scr1
= claimScratch();
2455 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 10, 2 * sizeof(uint32_t));
2456 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
2457 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
2458 releaseScratch(scr
);
2459 releaseScratch(scr1
);
2461 if (cond
== NotEqual
)
2462 return branchFalse();
2463 return branchTrue();
2466 Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
2468 RegisterID scr
= claimScratch();
2470 m_assembler
.loadConstant(left
.offset
, scr
);
2471 m_assembler
.addlRegReg(left
.base
, scr
);
2472 m_assembler
.movlMemReg(scr
, scr
);
2473 RegisterID scr1
= claimScratch();
2474 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 10, 2 * sizeof(uint32_t));
2475 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
2476 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
2477 releaseScratch(scr
);
2478 releaseScratch(scr1
);
2480 return (cond
== NotEqual
) ? branchFalse() : branchTrue();
2489 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2491 RegisterID scr
= claimScratch();
2492 DataLabelPtr label
= moveWithPatch(initialValue
, scr
);
2493 store32(scr
, address
);
2494 releaseScratch(scr
);
2498 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
2500 int sizeOfConstantPool()
2502 return m_assembler
.sizeOfConstantPool();
2505 Call
tailRecursiveCall()
2507 RegisterID scr
= claimScratch();
2509 m_assembler
.loadConstantUnReusable(0x0, scr
, true);
2510 Jump m_jump
= Jump(m_assembler
.jmp(scr
));
2511 releaseScratch(scr
);
2513 return Call::fromTailJump(m_jump
);
2516 Call
makeTailRecursiveCall(Jump oldJump
)
2519 return tailRecursiveCall();
2529 m_assembler
.synco();
2532 void abortWithReason(AbortReason reason
)
2534 move(TrustedImm32(reason
), SH4Registers::r0
);
2538 void abortWithReason(AbortReason reason
, intptr_t misc
)
2540 move(TrustedImm32(misc
), SH4Registers::r1
);
2541 abortWithReason(reason
);
2544 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2546 return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call
.dataLocation())));
2549 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2551 SH4Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2554 static ptrdiff_t maxJumpReplacementSize()
2556 return SH4Assembler::maxJumpReplacementSize();
2559 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2561 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2563 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2565 return label
.labelAtOffset(0);
2568 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID rd
, void* initialValue
)
2570 SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart
.dataLocation(), rd
, reinterpret_cast<int>(initialValue
));
2573 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2575 UNREACHABLE_FOR_PLATFORM();
2576 return CodeLocationLabel();
2579 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32
)
2581 UNREACHABLE_FOR_PLATFORM();
2582 return CodeLocationLabel();
2585 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel
, Address
, void*)
2587 UNREACHABLE_FOR_PLATFORM();
2590 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel
, Address
, int32_t)
2592 UNREACHABLE_FOR_PLATFORM();
2596 SH4Assembler::Condition
SH4Condition(RelationalCondition cond
)
2598 return static_cast<SH4Assembler::Condition
>(cond
);
2601 SH4Assembler::Condition
SH4Condition(ResultCondition cond
)
2603 return static_cast<SH4Assembler::Condition
>(cond
);
2606 friend class LinkBuffer
;
2607 friend class RepatchBuffer
;
2609 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2611 SH4Assembler::linkCall(code
, call
.m_label
, function
.value());
2614 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2616 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2619 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2621 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2627 #endif // ENABLE(ASSEMBLER)
2629 #endif // MacroAssemblerSH4_h