2 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
3 * Copyright (C) 2008 Apple Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef MacroAssemblerSH4_h
28 #define MacroAssemblerSH4_h
30 #if ENABLE(ASSEMBLER) && CPU(SH4)
32 #include "SH4Assembler.h"
33 #include "AbstractMacroAssembler.h"
34 #include <wtf/Assertions.h>
38 class MacroAssemblerSH4
: public AbstractMacroAssembler
<SH4Assembler
> {
40 typedef SH4Assembler::FPRegisterID FPRegisterID
;
42 static const Scale ScalePtr
= TimesFour
;
43 static const FPRegisterID fscratch
= SH4Registers::fr10
;
44 static const RegisterID stackPointerRegister
= SH4Registers::sp
;
45 static const RegisterID linkRegister
= SH4Registers::pr
;
46 static const RegisterID scratchReg3
= SH4Registers::r13
;
48 static const int MaximumCompactPtrAlignedAddressOffset
= 60;
50 enum RelationalCondition
{
51 Equal
= SH4Assembler::EQ
,
52 NotEqual
= SH4Assembler::NE
,
53 Above
= SH4Assembler::HI
,
54 AboveOrEqual
= SH4Assembler::HS
,
55 Below
= SH4Assembler::LI
,
56 BelowOrEqual
= SH4Assembler::LS
,
57 GreaterThan
= SH4Assembler::GT
,
58 GreaterThanOrEqual
= SH4Assembler::GE
,
59 LessThan
= SH4Assembler::LT
,
60 LessThanOrEqual
= SH4Assembler::LE
63 enum ResultCondition
{
64 Overflow
= SH4Assembler::OF
,
65 Signed
= SH4Assembler::SI
,
66 Zero
= SH4Assembler::EQ
,
67 NonZero
= SH4Assembler::NE
70 enum DoubleCondition
{
71 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
72 DoubleEqual
= SH4Assembler::EQ
,
73 DoubleNotEqual
= SH4Assembler::NE
,
74 DoubleGreaterThan
= SH4Assembler::GT
,
75 DoubleGreaterThanOrEqual
= SH4Assembler::GE
,
76 DoubleLessThan
= SH4Assembler::LT
,
77 DoubleLessThanOrEqual
= SH4Assembler::LE
,
78 // If either operand is NaN, these conditions always evaluate to true.
79 DoubleEqualOrUnordered
= SH4Assembler::EQU
,
80 DoubleNotEqualOrUnordered
= SH4Assembler::NEU
,
81 DoubleGreaterThanOrUnordered
= SH4Assembler::GTU
,
82 DoubleGreaterThanOrEqualOrUnordered
= SH4Assembler::GEU
,
83 DoubleLessThanOrUnordered
= SH4Assembler::LTU
,
84 DoubleLessThanOrEqualOrUnordered
= SH4Assembler::LEU
,
87 RegisterID
claimScratch()
89 return m_assembler
.claimScratch();
92 void releaseScratch(RegisterID reg
)
94 m_assembler
.releaseScratch(reg
);
97 // Integer arithmetic operations
99 void add32(RegisterID src
, RegisterID dest
)
101 m_assembler
.addlRegReg(src
, dest
);
104 void add32(TrustedImm32 imm
, RegisterID dest
)
106 if (m_assembler
.isImmediate(imm
.m_value
)) {
107 m_assembler
.addlImm8r(imm
.m_value
, dest
);
111 RegisterID scr
= claimScratch();
112 m_assembler
.loadConstant(imm
.m_value
, scr
);
113 m_assembler
.addlRegReg(scr
, dest
);
117 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
120 m_assembler
.movlRegReg(src
, dest
);
124 void add32(TrustedImm32 imm
, Address address
)
126 RegisterID scr
= claimScratch();
127 load32(address
, scr
);
129 store32(scr
, address
);
133 void add32(Address src
, RegisterID dest
)
135 RegisterID scr
= claimScratch();
137 m_assembler
.addlRegReg(scr
, dest
);
141 void and32(RegisterID src
, RegisterID dest
)
143 m_assembler
.andlRegReg(src
, dest
);
146 void and32(TrustedImm32 imm
, RegisterID dest
)
148 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
149 m_assembler
.andlImm8r(imm
.m_value
, dest
);
153 RegisterID scr
= claimScratch();
154 m_assembler
.loadConstant((imm
.m_value
), scr
);
155 m_assembler
.andlRegReg(scr
, dest
);
159 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
170 void lshift32(RegisterID shiftamount
, RegisterID dest
)
172 if (shiftamount
== SH4Registers::r0
)
173 m_assembler
.andlImm8r(0x1f, shiftamount
);
175 RegisterID scr
= claimScratch();
176 m_assembler
.loadConstant(0x1f, scr
);
177 m_assembler
.andlRegReg(scr
, shiftamount
);
180 m_assembler
.shllRegReg(dest
, shiftamount
);
183 void rshift32(int imm
, RegisterID dest
)
185 RegisterID scr
= claimScratch();
186 m_assembler
.loadConstant(-imm
, scr
);
187 m_assembler
.shaRegReg(dest
, scr
);
191 void lshift32(TrustedImm32 imm
, RegisterID dest
)
196 if ((imm
.m_value
== 1) || (imm
.m_value
== 2) || (imm
.m_value
== 8) || (imm
.m_value
== 16)) {
197 m_assembler
.shllImm8r(imm
.m_value
, dest
);
201 RegisterID scr
= claimScratch();
202 m_assembler
.loadConstant((imm
.m_value
& 0x1f) , scr
);
203 m_assembler
.shllRegReg(dest
, scr
);
207 void lshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
212 lshift32(shiftamount
, dest
);
215 void mul32(RegisterID src
, RegisterID dest
)
217 m_assembler
.imullRegReg(src
, dest
);
218 m_assembler
.stsmacl(dest
);
221 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
223 RegisterID scr
= claimScratch();
231 void or32(RegisterID src
, RegisterID dest
)
233 m_assembler
.orlRegReg(src
, dest
);
236 void or32(TrustedImm32 imm
, RegisterID dest
)
238 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
239 m_assembler
.orlImm8r(imm
.m_value
, dest
);
243 RegisterID scr
= claimScratch();
244 m_assembler
.loadConstant(imm
.m_value
, scr
);
245 m_assembler
.orlRegReg(scr
, dest
);
249 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
253 else if (op1
== dest
)
262 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
273 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
284 void rshift32(RegisterID shiftamount
, RegisterID dest
)
286 if (shiftamount
== SH4Registers::r0
)
287 m_assembler
.andlImm8r(0x1f, shiftamount
);
289 RegisterID scr
= claimScratch();
290 m_assembler
.loadConstant(0x1f, scr
);
291 m_assembler
.andlRegReg(scr
, shiftamount
);
294 m_assembler
.neg(shiftamount
, shiftamount
);
295 m_assembler
.shaRegReg(dest
, shiftamount
);
298 void rshift32(TrustedImm32 imm
, RegisterID dest
)
300 if (imm
.m_value
& 0x1f)
301 rshift32(imm
.m_value
& 0x1f, dest
);
304 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
311 void sub32(RegisterID src
, RegisterID dest
)
313 m_assembler
.sublRegReg(src
, dest
);
316 void sub32(TrustedImm32 imm
, AbsoluteAddress address
, RegisterID scratchReg
)
318 RegisterID result
= claimScratch();
320 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
321 m_assembler
.movlMemReg(scratchReg
, result
);
323 if (m_assembler
.isImmediate(-imm
.m_value
))
324 m_assembler
.addlImm8r(-imm
.m_value
, result
);
326 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
327 m_assembler
.sublRegReg(scratchReg3
, result
);
330 store32(result
, scratchReg
);
331 releaseScratch(result
);
334 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
336 RegisterID result
= claimScratch();
337 RegisterID scratchReg
= claimScratch();
339 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
340 m_assembler
.movlMemReg(scratchReg
, result
);
342 if (m_assembler
.isImmediate(-imm
.m_value
))
343 m_assembler
.addlImm8r(-imm
.m_value
, result
);
345 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
346 m_assembler
.sublRegReg(scratchReg3
, result
);
349 store32(result
, scratchReg
);
350 releaseScratch(result
);
351 releaseScratch(scratchReg
);
354 void add32(TrustedImm32 imm
, AbsoluteAddress address
, RegisterID scratchReg
)
356 RegisterID result
= claimScratch();
358 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
359 m_assembler
.movlMemReg(scratchReg
, result
);
361 if (m_assembler
.isImmediate(imm
.m_value
))
362 m_assembler
.addlImm8r(imm
.m_value
, result
);
364 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
365 m_assembler
.addlRegReg(scratchReg3
, result
);
368 store32(result
, scratchReg
);
369 releaseScratch(result
);
372 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
374 RegisterID result
= claimScratch();
375 RegisterID scratchReg
= claimScratch();
377 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
378 m_assembler
.movlMemReg(scratchReg
, result
);
380 if (m_assembler
.isImmediate(imm
.m_value
))
381 m_assembler
.addlImm8r(imm
.m_value
, result
);
383 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
384 m_assembler
.addlRegReg(scratchReg3
, result
);
387 store32(result
, scratchReg
);
388 releaseScratch(result
);
389 releaseScratch(scratchReg
);
392 void sub32(TrustedImm32 imm
, RegisterID dest
)
394 if (m_assembler
.isImmediate(-imm
.m_value
)) {
395 m_assembler
.addlImm8r(-imm
.m_value
, dest
);
399 RegisterID scr
= claimScratch();
400 m_assembler
.loadConstant(imm
.m_value
, scr
);
401 m_assembler
.sublRegReg(scr
, dest
);
405 void sub32(Address src
, RegisterID dest
)
407 RegisterID scr
= claimScratch();
409 m_assembler
.sublRegReg(scr
, dest
);
413 void xor32(RegisterID src
, RegisterID dest
)
415 m_assembler
.xorlRegReg(src
, dest
);
418 void xor32(TrustedImm32 imm
, RegisterID srcDest
)
420 if (imm
.m_value
== -1) {
421 m_assembler
.notlReg(srcDest
, srcDest
);
425 if ((srcDest
!= SH4Registers::r0
) || (imm
.m_value
> 255) || (imm
.m_value
< 0)) {
426 RegisterID scr
= claimScratch();
427 m_assembler
.loadConstant((imm
.m_value
), scr
);
428 m_assembler
.xorlRegReg(scr
, srcDest
);
433 m_assembler
.xorlImm8r(imm
.m_value
, srcDest
);
436 void compare32(int imm
, RegisterID dst
, RelationalCondition cond
)
438 if (((cond
== Equal
) || (cond
== NotEqual
)) && (dst
== SH4Registers::r0
) && m_assembler
.isImmediate(imm
)) {
439 m_assembler
.cmpEqImmR0(imm
, dst
);
443 RegisterID scr
= claimScratch();
444 m_assembler
.loadConstant(imm
, scr
);
445 m_assembler
.cmplRegReg(scr
, dst
, SH4Condition(cond
));
449 void compare32(int offset
, RegisterID base
, RegisterID left
, RelationalCondition cond
)
451 RegisterID scr
= claimScratch();
453 m_assembler
.movlMemReg(base
, scr
);
454 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
459 if ((offset
< 0) || (offset
>= 64)) {
460 m_assembler
.loadConstant(offset
, scr
);
461 m_assembler
.addlRegReg(base
, scr
);
462 m_assembler
.movlMemReg(scr
, scr
);
463 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
468 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
469 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
473 void testImm(int imm
, int offset
, RegisterID base
)
475 RegisterID scr
= claimScratch();
476 RegisterID scr1
= claimScratch();
478 if ((offset
< 0) || (offset
>= 64)) {
479 m_assembler
.loadConstant(offset
, scr
);
480 m_assembler
.addlRegReg(base
, scr
);
481 m_assembler
.movlMemReg(scr
, scr
);
483 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
485 m_assembler
.movlMemReg(base
, scr
);
486 if (m_assembler
.isImmediate(imm
))
487 m_assembler
.movImm8(imm
, scr1
);
489 m_assembler
.loadConstant(imm
, scr1
);
491 m_assembler
.testlRegReg(scr
, scr1
);
493 releaseScratch(scr1
);
496 void testlImm(int imm
, RegisterID dst
)
498 if ((dst
== SH4Registers::r0
) && (imm
<= 255) && (imm
>= 0)) {
499 m_assembler
.testlImm8r(imm
, dst
);
503 RegisterID scr
= claimScratch();
504 m_assembler
.loadConstant(imm
, scr
);
505 m_assembler
.testlRegReg(scr
, dst
);
509 void compare32(RegisterID right
, int offset
, RegisterID base
, RelationalCondition cond
)
512 RegisterID scr
= claimScratch();
513 m_assembler
.movlMemReg(base
, scr
);
514 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
519 if ((offset
< 0) || (offset
>= 64)) {
520 RegisterID scr
= claimScratch();
521 m_assembler
.loadConstant(offset
, scr
);
522 m_assembler
.addlRegReg(base
, scr
);
523 m_assembler
.movlMemReg(scr
, scr
);
524 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
529 RegisterID scr
= claimScratch();
530 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
531 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
535 void compare32(int imm
, int offset
, RegisterID base
, RelationalCondition cond
)
538 RegisterID scr
= claimScratch();
539 RegisterID scr1
= claimScratch();
540 m_assembler
.movlMemReg(base
, scr
);
541 m_assembler
.loadConstant(imm
, scr1
);
542 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
543 releaseScratch(scr1
);
548 if ((offset
< 0) || (offset
>= 64)) {
549 RegisterID scr
= claimScratch();
550 RegisterID scr1
= claimScratch();
551 m_assembler
.loadConstant(offset
, scr
);
552 m_assembler
.addlRegReg(base
, scr
);
553 m_assembler
.movlMemReg(scr
, scr
);
554 m_assembler
.loadConstant(imm
, scr1
);
555 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
556 releaseScratch(scr1
);
561 RegisterID scr
= claimScratch();
562 RegisterID scr1
= claimScratch();
563 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
564 m_assembler
.loadConstant(imm
, scr1
);
565 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
566 releaseScratch(scr1
);
570 // Memory access operation
572 void load32(ImplicitAddress address
, RegisterID dest
)
574 load32(address
.base
, address
.offset
, dest
);
577 void load8(ImplicitAddress address
, RegisterID dest
)
579 load8(address
.base
, address
.offset
, dest
);
582 void load8(BaseIndex address
, RegisterID dest
)
584 RegisterID scr
= claimScratch();
585 move(address
.index
, scr
);
586 lshift32(TrustedImm32(address
.scale
), scr
);
587 add32(address
.base
, scr
);
588 load8(scr
, address
.offset
, dest
);
592 void load32(BaseIndex address
, RegisterID dest
)
594 RegisterID scr
= claimScratch();
595 move(address
.index
, scr
);
596 lshift32(TrustedImm32(address
.scale
), scr
);
597 add32(address
.base
, scr
);
598 load32(scr
, address
.offset
, dest
);
602 void load32(const void* address
, RegisterID dest
)
604 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address
)), dest
);
605 m_assembler
.movlMemReg(dest
, dest
);
608 void load32(RegisterID base
, int offset
, RegisterID dest
)
611 m_assembler
.movlMemReg(base
, dest
);
615 if ((offset
>= 0) && (offset
< 64)) {
616 m_assembler
.movlMemReg(offset
>> 2, base
, dest
);
620 if ((dest
== SH4Registers::r0
) && (dest
!= base
)) {
621 m_assembler
.loadConstant((offset
), dest
);
622 m_assembler
.movlR0mr(base
, dest
);
628 scr
= claimScratch();
631 m_assembler
.loadConstant((offset
), scr
);
632 m_assembler
.addlRegReg(base
, scr
);
633 m_assembler
.movlMemReg(scr
, dest
);
639 void load8(RegisterID base
, int offset
, RegisterID dest
)
642 m_assembler
.movbMemReg(base
, dest
);
643 m_assembler
.extub(dest
, dest
);
647 if ((offset
> 0) && (offset
< 64) && (dest
== SH4Registers::r0
)) {
648 m_assembler
.movbMemReg(offset
, base
, dest
);
649 m_assembler
.extub(dest
, dest
);
654 m_assembler
.loadConstant((offset
), dest
);
655 m_assembler
.addlRegReg(base
, dest
);
656 m_assembler
.movbMemReg(dest
, dest
);
657 m_assembler
.extub(dest
, dest
);
661 RegisterID scr
= claimScratch();
662 m_assembler
.loadConstant((offset
), scr
);
663 m_assembler
.addlRegReg(base
, scr
);
664 m_assembler
.movbMemReg(scr
, dest
);
665 m_assembler
.extub(dest
, dest
);
669 void load32(RegisterID r0
, RegisterID src
, RegisterID dst
)
671 ASSERT(r0
== SH4Registers::r0
);
672 m_assembler
.movlR0mr(src
, dst
);
675 void load32(RegisterID src
, RegisterID dst
)
677 m_assembler
.movlMemReg(src
, dst
);
680 void load16(ImplicitAddress address
, RegisterID dest
)
682 if (!address
.offset
) {
683 m_assembler
.movwMemReg(address
.base
, dest
);
688 if ((address
.offset
> 0) && (address
.offset
< 64) && (dest
== SH4Registers::r0
)) {
689 m_assembler
.movwMemReg(address
.offset
, address
.base
, dest
);
694 if (address
.base
!= dest
) {
695 m_assembler
.loadConstant((address
.offset
), dest
);
696 m_assembler
.addlRegReg(address
.base
, dest
);
697 m_assembler
.movwMemReg(dest
, dest
);
702 RegisterID scr
= claimScratch();
703 m_assembler
.loadConstant((address
.offset
), scr
);
704 m_assembler
.addlRegReg(address
.base
, scr
);
705 m_assembler
.movwMemReg(scr
, dest
);
710 void load16Unaligned(BaseIndex address
, RegisterID dest
)
713 RegisterID scr
= claimScratch();
714 RegisterID scr1
= claimScratch();
716 move(address
.index
, scr
);
717 lshift32(TrustedImm32(address
.scale
), scr
);
720 add32(TrustedImm32(address
.offset
), scr
);
722 add32(address
.base
, scr
);
724 add32(TrustedImm32(1), scr
);
726 m_assembler
.shllImm8r(8, dest
);
730 releaseScratch(scr1
);
733 void load16(RegisterID src
, RegisterID dest
)
735 m_assembler
.movwMemReg(src
, dest
);
739 void load16(RegisterID r0
, RegisterID src
, RegisterID dest
)
741 ASSERT(r0
== SH4Registers::r0
);
742 m_assembler
.movwR0mr(src
, dest
);
746 void load16(BaseIndex address
, RegisterID dest
)
748 RegisterID scr
= claimScratch();
750 move(address
.index
, scr
);
751 lshift32(TrustedImm32(address
.scale
), scr
);
754 add32(TrustedImm32(address
.offset
), scr
);
755 if (address
.base
== SH4Registers::r0
)
756 load16(address
.base
, scr
, dest
);
758 add32(address
.base
, scr
);
765 void store32(RegisterID src
, ImplicitAddress address
)
767 RegisterID scr
= claimScratch();
768 store32(src
, address
.offset
, address
.base
, scr
);
772 void store32(RegisterID src
, int offset
, RegisterID base
, RegisterID scr
)
775 m_assembler
.movlRegMem(src
, base
);
779 if ((offset
>=0) && (offset
< 64)) {
780 m_assembler
.movlRegMem(src
, offset
>> 2, base
);
784 m_assembler
.loadConstant((offset
), scr
);
785 if (scr
== SH4Registers::r0
) {
786 m_assembler
.movlRegMemr0(src
, base
);
790 m_assembler
.addlRegReg(base
, scr
);
791 m_assembler
.movlRegMem(src
, scr
);
794 void store32(RegisterID src
, RegisterID offset
, RegisterID base
)
796 ASSERT(offset
== SH4Registers::r0
);
797 m_assembler
.movlRegMemr0(src
, base
);
800 void store32(RegisterID src
, RegisterID dst
)
802 m_assembler
.movlRegMem(src
, dst
);
805 void store32(TrustedImm32 imm
, ImplicitAddress address
)
807 RegisterID scr
= claimScratch();
808 RegisterID scr1
= claimScratch();
809 m_assembler
.loadConstant((imm
.m_value
), scr
);
810 store32(scr
, address
.offset
, address
.base
, scr1
);
812 releaseScratch(scr1
);
815 void store32(RegisterID src
, BaseIndex address
)
817 RegisterID scr
= claimScratch();
819 move(address
.index
, scr
);
820 lshift32(TrustedImm32(address
.scale
), scr
);
821 add32(address
.base
, scr
);
822 store32(src
, Address(scr
, address
.offset
));
827 void store32(TrustedImm32 imm
, void* address
)
829 RegisterID scr
= claimScratch();
830 RegisterID scr1
= claimScratch();
831 m_assembler
.loadConstant((imm
.m_value
), scr
);
832 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr1
);
833 m_assembler
.movlRegMem(scr
, scr1
);
835 releaseScratch(scr1
);
838 void store32(RegisterID src
, void* address
)
840 RegisterID scr
= claimScratch();
841 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
842 m_assembler
.movlRegMem(src
, scr
);
846 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
848 RegisterID scr
= claimScratch();
849 DataLabel32
label(this);
850 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
851 m_assembler
.addlRegReg(address
.base
, scr
);
852 m_assembler
.movlMemReg(scr
, dest
);
857 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
859 RegisterID scr
= claimScratch();
860 DataLabel32
label(this);
861 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
862 m_assembler
.addlRegReg(address
.base
, scr
);
863 m_assembler
.movlRegMem(src
, scr
);
868 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
870 DataLabelCompact
dataLabel(this);
871 ASSERT(address
.offset
<= MaximumCompactPtrAlignedAddressOffset
);
872 ASSERT(address
.offset
>= 0);
873 m_assembler
.movlMemRegCompact(address
.offset
>> 2, address
.base
, dest
);
877 // Floating-point operations
879 static bool supportsFloatingPoint() { return true; }
880 static bool supportsFloatingPointTruncate() { return true; }
881 static bool supportsFloatingPointSqrt() { return true; }
882 static bool supportsFloatingPointAbs() { return false; }
884 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
886 RegisterID scr
= claimScratch();
888 m_assembler
.loadConstant(address
.offset
, scr
);
889 if (address
.base
== SH4Registers::r0
) {
890 m_assembler
.fmovsReadr0r(scr
, (FPRegisterID
)(dest
+ 1));
891 m_assembler
.addlImm8r(4, scr
);
892 m_assembler
.fmovsReadr0r(scr
, dest
);
897 m_assembler
.addlRegReg(address
.base
, scr
);
898 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
899 m_assembler
.fmovsReadrm(scr
, dest
);
903 void loadDouble(const void* address
, FPRegisterID dest
)
905 RegisterID scr
= claimScratch();
906 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
907 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
908 m_assembler
.fmovsReadrm(scr
, dest
);
912 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
914 RegisterID scr
= claimScratch();
915 m_assembler
.loadConstant(address
.offset
, scr
);
916 m_assembler
.addlRegReg(address
.base
, scr
);
917 m_assembler
.fmovsWriterm((FPRegisterID
)(src
+ 1), scr
);
918 m_assembler
.addlImm8r(4, scr
);
919 m_assembler
.fmovsWriterm(src
, scr
);
923 void addDouble(FPRegisterID src
, FPRegisterID dest
)
925 m_assembler
.daddRegReg(src
, dest
);
928 void addDouble(Address address
, FPRegisterID dest
)
930 loadDouble(address
, fscratch
);
931 addDouble(fscratch
, dest
);
934 void subDouble(FPRegisterID src
, FPRegisterID dest
)
936 m_assembler
.dsubRegReg(src
, dest
);
939 void subDouble(Address address
, FPRegisterID dest
)
941 loadDouble(address
, fscratch
);
942 subDouble(fscratch
, dest
);
945 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
947 m_assembler
.dmulRegReg(src
, dest
);
950 void mulDouble(Address address
, FPRegisterID dest
)
952 loadDouble(address
, fscratch
);
953 mulDouble(fscratch
, dest
);
956 void divDouble(FPRegisterID src
, FPRegisterID dest
)
958 m_assembler
.ddivRegReg(src
, dest
);
961 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
963 m_assembler
.ldsrmfpul(src
);
964 m_assembler
.floatfpulDreg(dest
);
967 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
969 RegisterID scr
= claimScratch();
970 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(src
.m_ptr
), scr
);
971 convertInt32ToDouble(scr
, dest
);
975 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
977 RegisterID scr
= claimScratch();
979 convertInt32ToDouble(scr
, dest
);
983 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
985 RegisterID scr
= claimScratch();
986 RegisterID scr1
= claimScratch();
990 if (dest
!= SH4Registers::r0
)
991 move(SH4Registers::r0
, scr1
);
993 move(address
.index
, scr
);
994 lshift32(TrustedImm32(address
.scale
), scr
);
995 add32(address
.base
, scr
);
998 add32(TrustedImm32(address
.offset
), scr
);
1000 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 68, sizeof(uint32_t));
1001 move(scr
, SH4Registers::r0
);
1002 m_assembler
.andlImm8r(0x3, SH4Registers::r0
);
1003 m_assembler
.cmpEqImmR0(0x0, SH4Registers::r0
);
1004 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1005 if (dest
!= SH4Registers::r0
)
1006 move(scr1
, SH4Registers::r0
);
1009 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1012 m_assembler
.andlImm8r(0x1, SH4Registers::r0
);
1013 m_assembler
.cmpEqImmR0(0x0, SH4Registers::r0
);
1015 if (dest
!= SH4Registers::r0
)
1016 move(scr1
, SH4Registers::r0
);
1018 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1020 add32(TrustedImm32(2), scr
);
1022 m_assembler
.shllImm8r(16, dest
);
1024 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1028 add32(TrustedImm32(1), scr
);
1030 m_assembler
.shllImm8r(8, dest
);
1032 add32(TrustedImm32(2), scr
);
1034 m_assembler
.shllImm8r(8, dest
);
1035 m_assembler
.shllImm8r(16, dest
);
1039 releaseScratch(scr
);
1040 releaseScratch(scr1
);
1043 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1045 RegisterID scr
= scratchReg3
;
1046 load32WithUnalignedHalfWords(left
, scr
);
1047 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1048 m_assembler
.testlRegReg(scr
, scr
);
1050 compare32(right
.m_value
, scr
, cond
);
1052 if (cond
== NotEqual
)
1053 return branchFalse();
1054 return branchTrue();
1057 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
1059 m_assembler
.movImm8(0, scratchReg3
);
1060 convertInt32ToDouble(scratchReg3
, scratch
);
1061 return branchDouble(DoubleNotEqual
, reg
, scratch
);
1064 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
1066 m_assembler
.movImm8(0, scratchReg3
);
1067 convertInt32ToDouble(scratchReg3
, scratch
);
1068 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
1071 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1073 if (cond
== DoubleEqual
) {
1074 m_assembler
.dcmppeq(right
, left
);
1075 return branchTrue();
1078 if (cond
== DoubleNotEqual
) {
1079 RegisterID scr
= claimScratch();
1081 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1082 m_assembler
.dcnvds(right
);
1083 m_assembler
.stsfpulReg(scr
);
1084 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1085 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1086 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1087 m_assembler
.dcnvds(left
);
1088 m_assembler
.stsfpulReg(scr
);
1089 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1090 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1091 m_assembler
.dcmppeq(right
, left
);
1092 releaseScratch(scr
);
1093 Jump m_jump
= branchFalse();
1098 if (cond
== DoubleGreaterThan
) {
1099 m_assembler
.dcmppgt(right
, left
);
1100 return branchTrue();
1103 if (cond
== DoubleGreaterThanOrEqual
) {
1104 m_assembler
.dcmppgt(left
, right
);
1105 return branchFalse();
1108 if (cond
== DoubleLessThan
) {
1109 m_assembler
.dcmppgt(left
, right
);
1110 return branchTrue();
1113 if (cond
== DoubleLessThanOrEqual
) {
1114 m_assembler
.dcmppgt(right
, left
);
1115 return branchFalse();
1118 if (cond
== DoubleEqualOrUnordered
) {
1119 RegisterID scr
= claimScratch();
1121 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1122 m_assembler
.dcnvds(right
);
1123 m_assembler
.stsfpulReg(scr
);
1124 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1125 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1126 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1127 m_assembler
.dcnvds(left
);
1128 m_assembler
.stsfpulReg(scr
);
1129 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1130 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1131 m_assembler
.dcmppeq(left
, right
);
1132 Jump m_jump
= Jump(m_assembler
.je());
1134 m_assembler
.extraInstrForBranch(scr
);
1135 releaseScratch(scr
);
1139 if (cond
== DoubleGreaterThanOrUnordered
) {
1140 RegisterID scr
= claimScratch();
1142 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1143 m_assembler
.dcnvds(right
);
1144 m_assembler
.stsfpulReg(scr
);
1145 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1146 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1147 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1148 m_assembler
.dcnvds(left
);
1149 m_assembler
.stsfpulReg(scr
);
1150 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1151 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1152 m_assembler
.dcmppgt(right
, left
);
1153 Jump m_jump
= Jump(m_assembler
.je());
1155 m_assembler
.extraInstrForBranch(scr
);
1156 releaseScratch(scr
);
1160 if (cond
== DoubleGreaterThanOrEqualOrUnordered
) {
1161 RegisterID scr
= claimScratch();
1163 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1164 m_assembler
.dcnvds(right
);
1165 m_assembler
.stsfpulReg(scr
);
1166 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1167 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1168 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1169 m_assembler
.dcnvds(left
);
1170 m_assembler
.stsfpulReg(scr
);
1171 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1172 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1173 m_assembler
.dcmppgt(left
, right
);
1174 Jump m_jump
= Jump(m_assembler
.jne());
1176 m_assembler
.extraInstrForBranch(scr
);
1177 releaseScratch(scr
);
1181 if (cond
== DoubleLessThanOrUnordered
) {
1182 RegisterID scr
= claimScratch();
1184 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1185 m_assembler
.dcnvds(right
);
1186 m_assembler
.stsfpulReg(scr
);
1187 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1188 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1189 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1190 m_assembler
.dcnvds(left
);
1191 m_assembler
.stsfpulReg(scr
);
1192 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1193 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1194 m_assembler
.dcmppgt(left
, right
);
1195 Jump m_jump
= Jump(m_assembler
.je());
1197 m_assembler
.extraInstrForBranch(scr
);
1198 releaseScratch(scr
);
1202 if (cond
== DoubleLessThanOrEqualOrUnordered
) {
1203 RegisterID scr
= claimScratch();
1205 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1206 m_assembler
.dcnvds(right
);
1207 m_assembler
.stsfpulReg(scr
);
1208 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1209 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1210 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1211 m_assembler
.dcnvds(left
);
1212 m_assembler
.stsfpulReg(scr
);
1213 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1214 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1215 m_assembler
.dcmppgt(right
, left
);
1216 Jump m_jump
= Jump(m_assembler
.jne());
1218 m_assembler
.extraInstrForBranch(scr
);
1219 releaseScratch(scr
);
1223 ASSERT(cond
== DoubleNotEqualOrUnordered
);
1224 RegisterID scr
= claimScratch();
1226 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1227 m_assembler
.dcnvds(right
);
1228 m_assembler
.stsfpulReg(scr
);
1229 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1230 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1231 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1232 m_assembler
.dcnvds(left
);
1233 m_assembler
.stsfpulReg(scr
);
1234 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1235 end
.append(Jump(m_assembler
.je(), SH4Assembler::JumpNear
));
1236 m_assembler
.dcmppeq(right
, left
);
1237 Jump m_jump
= Jump(m_assembler
.jne());
1239 m_assembler
.extraInstrForBranch(scr
);
1240 releaseScratch(scr
);
1246 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1247 Jump m_jump
= Jump(m_assembler
.je());
1248 m_assembler
.extraInstrForBranch(scratchReg3
);
1254 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1255 Jump m_jump
= Jump(m_assembler
.jne());
1256 m_assembler
.extraInstrForBranch(scratchReg3
);
1260 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1262 RegisterID scr
= claimScratch();
1263 move(left
.index
, scr
);
1264 lshift32(TrustedImm32(left
.scale
), scr
);
1265 add32(left
.base
, scr
);
1266 load32(scr
, left
.offset
, scr
);
1267 compare32(right
.m_value
, scr
, cond
);
1268 releaseScratch(scr
);
1270 if (cond
== NotEqual
)
1271 return branchFalse();
1272 return branchTrue();
1275 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1278 m_assembler
.dmovRegReg(src
, dest
);
1279 m_assembler
.dsqrt(dest
);
1282 void absDouble(FPRegisterID
, FPRegisterID
)
1284 ASSERT_NOT_REACHED();
1287 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1289 RegisterID addressTempRegister
= claimScratch();
1290 load8(address
, addressTempRegister
);
1291 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1292 releaseScratch(addressTempRegister
);
1296 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1302 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1304 RegisterID addressTempRegister
= claimScratch();
1305 load8(left
, addressTempRegister
);
1306 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1307 releaseScratch(addressTempRegister
);
1311 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1313 RegisterID addressTempRegister
= claimScratch();
1314 load8(left
, addressTempRegister
);
1315 compare32(cond
, addressTempRegister
, right
, dest
);
1316 releaseScratch(addressTempRegister
);
1319 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1321 m_assembler
.ftrcdrmfpul(src
);
1322 m_assembler
.stsfpulReg(dest
);
1323 m_assembler
.loadConstant(0x7fffffff, scratchReg3
);
1324 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1325 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 14, sizeof(uint32_t));
1326 m_assembler
.branch(BT_OPCODE
, 2);
1327 m_assembler
.addlImm8r(1, scratchReg3
);
1328 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1329 return branchTrue();
1332 // Stack manipulation operations
1334 void pop(RegisterID dest
)
1336 m_assembler
.popReg(dest
);
1339 void push(RegisterID src
)
1341 m_assembler
.pushReg(src
);
1344 void push(Address address
)
1346 if (!address
.offset
) {
1351 if ((address
.offset
< 0) || (address
.offset
>= 64)) {
1352 RegisterID scr
= claimScratch();
1353 m_assembler
.loadConstant(address
.offset
, scr
);
1354 m_assembler
.addlRegReg(address
.base
, scr
);
1355 m_assembler
.movlMemReg(scr
, SH4Registers::sp
);
1356 m_assembler
.addlImm8r(-4, SH4Registers::sp
);
1357 releaseScratch(scr
);
1361 m_assembler
.movlMemReg(address
.offset
>> 2, address
.base
, SH4Registers::sp
);
1362 m_assembler
.addlImm8r(-4, SH4Registers::sp
);
1365 void push(TrustedImm32 imm
)
1367 RegisterID scr
= claimScratch();
1368 m_assembler
.loadConstant(imm
.m_value
, scr
);
1370 releaseScratch(scr
);
1373 // Register move operations
1375 void move(TrustedImm32 imm
, RegisterID dest
)
1377 m_assembler
.loadConstant(imm
.m_value
, dest
);
1380 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
1382 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
, sizeof(uint32_t));
1383 DataLabelPtr
dataLabel(this);
1384 m_assembler
.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue
.m_value
), dest
);
1388 void move(RegisterID src
, RegisterID dest
)
1391 m_assembler
.movlRegReg(src
, dest
);
1394 void move(TrustedImmPtr imm
, RegisterID dest
)
1396 m_assembler
.loadConstant(imm
.asIntptr(), dest
);
1399 void extuw(RegisterID src
, RegisterID dst
)
1401 m_assembler
.extuw(src
, dst
);
1404 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1406 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1407 if (cond
!= NotEqual
) {
1408 m_assembler
.movt(dest
);
1412 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1413 m_assembler
.movImm8(0, dest
);
1414 m_assembler
.branch(BT_OPCODE
, 0);
1415 m_assembler
.movImm8(1, dest
);
1418 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1422 compare32(cond
, left
, dest
, dest
);
1426 RegisterID scr
= claimScratch();
1428 compare32(cond
, left
, scr
, dest
);
1429 releaseScratch(scr
);
1432 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1434 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1436 load8(address
, dest
);
1437 if (mask
.m_value
== -1)
1438 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1440 testlImm(mask
.m_value
, dest
);
1441 if (cond
!= NonZero
) {
1442 m_assembler
.movt(dest
);
1446 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1447 m_assembler
.movImm8(0, dest
);
1448 m_assembler
.branch(BT_OPCODE
, 0);
1449 m_assembler
.movImm8(1, dest
);
1452 void loadPtrLinkReg(ImplicitAddress address
)
1454 RegisterID scr
= claimScratch();
1455 load32(address
, scr
);
1456 m_assembler
.ldspr(scr
);
1457 releaseScratch(scr
);
1460 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1462 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1463 /* BT label => BF off
1468 if (cond
== NotEqual
)
1469 return branchFalse();
1470 return branchTrue();
1473 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1475 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1476 m_assembler
.testlRegReg(left
, left
);
1478 compare32(right
.m_value
, left
, cond
);
1480 if (cond
== NotEqual
)
1481 return branchFalse();
1482 return branchTrue();
1485 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1487 compare32(right
.offset
, right
.base
, left
, cond
);
1488 if (cond
== NotEqual
)
1489 return branchFalse();
1490 return branchTrue();
1493 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1495 compare32(right
, left
.offset
, left
.base
, cond
);
1496 if (cond
== NotEqual
)
1497 return branchFalse();
1498 return branchTrue();
1501 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1503 compare32(right
.m_value
, left
.offset
, left
.base
, cond
);
1504 if (cond
== NotEqual
)
1505 return branchFalse();
1506 return branchTrue();
1509 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1511 RegisterID scr
= claimScratch();
1513 move(TrustedImm32(reinterpret_cast<uint32_t>(left
.m_ptr
)), scr
);
1514 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
1515 releaseScratch(scr
);
1517 if (cond
== NotEqual
)
1518 return branchFalse();
1519 return branchTrue();
1522 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1524 RegisterID addressTempRegister
= claimScratch();
1526 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(left
.m_ptr
), addressTempRegister
);
1527 m_assembler
.movlMemReg(addressTempRegister
, addressTempRegister
);
1528 compare32(right
.m_value
, addressTempRegister
, cond
);
1529 releaseScratch(addressTempRegister
);
1531 if (cond
== NotEqual
)
1532 return branchFalse();
1533 return branchTrue();
1536 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1538 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1539 RegisterID scr
= claimScratch();
1541 move(left
.index
, scr
);
1542 lshift32(TrustedImm32(left
.scale
), scr
);
1545 add32(TrustedImm32(left
.offset
), scr
);
1546 add32(left
.base
, scr
);
1548 RegisterID scr1
= claimScratch();
1549 m_assembler
.loadConstant(right
.m_value
, scr1
);
1550 releaseScratch(scr
);
1551 releaseScratch(scr1
);
1553 return branch32(cond
, scr
, scr1
);
1556 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1558 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1560 m_assembler
.testlRegReg(reg
, mask
);
1562 if (cond
== NotEqual
)
1563 return branchFalse();
1564 return branchTrue();
1567 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1569 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1571 if (mask
.m_value
== -1)
1572 m_assembler
.testlRegReg(reg
, reg
);
1574 testlImm(mask
.m_value
, reg
);
1576 if (cond
== NotEqual
)
1577 return branchFalse();
1578 return branchTrue();
1581 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1583 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1585 if (mask
.m_value
== -1)
1586 compare32(0, address
.offset
, address
.base
, static_cast<RelationalCondition
>(cond
));
1588 testImm(mask
.m_value
, address
.offset
, address
.base
);
1590 if (cond
== NotEqual
)
1591 return branchFalse();
1592 return branchTrue();
1595 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1597 RegisterID scr
= claimScratch();
1599 move(address
.index
, scr
);
1600 lshift32(TrustedImm32(address
.scale
), scr
);
1601 add32(address
.base
, scr
);
1602 load32(scr
, address
.offset
, scr
);
1604 if (mask
.m_value
== -1)
1605 m_assembler
.testlRegReg(scr
, scr
);
1607 testlImm(mask
.m_value
, scr
);
1609 releaseScratch(scr
);
1611 if (cond
== NotEqual
)
1612 return branchFalse();
1613 return branchTrue();
1618 return Jump(m_assembler
.jmp());
1621 void jump(RegisterID target
)
1623 m_assembler
.jmpReg(target
);
1626 void jump(Address address
)
1628 RegisterID scr
= claimScratch();
1630 if ((address
.offset
< 0) || (address
.offset
>= 64)) {
1631 m_assembler
.loadConstant(address
.offset
, scr
);
1632 m_assembler
.addlRegReg(address
.base
, scr
);
1633 m_assembler
.movlMemReg(scr
, scr
);
1634 } else if (address
.offset
)
1635 m_assembler
.movlMemReg(address
.offset
>> 2, address
.base
, scr
);
1637 m_assembler
.movlMemReg(address
.base
, scr
);
1638 m_assembler
.jmpReg(scr
);
1640 releaseScratch(scr
);
1643 // Arithmetic control flow operations
1645 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1647 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1649 if (cond
== Overflow
) {
1650 m_assembler
.addvlRegReg(src
, dest
);
1651 return branchTrue();
1654 if (cond
== Signed
) {
1655 m_assembler
.addlRegReg(src
, dest
);
1656 // Check if dest is negative
1657 m_assembler
.cmppz(dest
);
1658 return branchFalse();
1661 m_assembler
.addlRegReg(src
, dest
);
1662 compare32(0, dest
, Equal
);
1664 if (cond
== NotEqual
)
1665 return branchFalse();
1666 return branchTrue();
1669 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1671 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1673 move(imm
, scratchReg3
);
1674 return branchAdd32(cond
, scratchReg3
, dest
);
1677 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1679 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1684 if (cond
== Overflow
) {
1685 move(imm
, scratchReg3
);
1686 m_assembler
.addvlRegReg(scratchReg3
, dest
);
1687 return branchTrue();
1692 if (cond
== Signed
) {
1693 m_assembler
.cmppz(dest
);
1694 return branchFalse();
1697 compare32(0, dest
, Equal
);
1699 if (cond
== NotEqual
)
1700 return branchFalse();
1701 return branchTrue();
1704 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1706 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1708 if (cond
== Overflow
) {
1709 RegisterID scr1
= claimScratch();
1710 RegisterID scr
= claimScratch();
1711 m_assembler
.dmullRegReg(src
, dest
);
1712 m_assembler
.stsmacl(dest
);
1713 m_assembler
.movImm8(-31, scr
);
1714 m_assembler
.movlRegReg(dest
, scr1
);
1715 m_assembler
.shaRegReg(scr1
, scr
);
1716 m_assembler
.stsmach(scr
);
1717 m_assembler
.cmplRegReg(scr
, scr1
, SH4Condition(Equal
));
1718 releaseScratch(scr1
);
1719 releaseScratch(scr
);
1720 return branchFalse();
1723 m_assembler
.imullRegReg(src
, dest
);
1724 m_assembler
.stsmacl(dest
);
1725 if (cond
== Signed
) {
1726 // Check if dest is negative
1727 m_assembler
.cmppz(dest
);
1728 return branchFalse();
1731 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1733 if (cond
== NotEqual
)
1734 return branchFalse();
1735 return branchTrue();
1738 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1740 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1742 move(imm
, scratchReg3
);
1746 return branchMul32(cond
, scratchReg3
, dest
);
1749 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1751 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1753 if (cond
== Overflow
) {
1754 m_assembler
.subvlRegReg(src
, dest
);
1755 return branchTrue();
1758 if (cond
== Signed
) {
1759 // Check if dest is negative
1760 m_assembler
.sublRegReg(src
, dest
);
1761 compare32(0, dest
, LessThan
);
1762 return branchTrue();
1766 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1768 if (cond
== NotEqual
)
1769 return branchFalse();
1770 return branchTrue();
1773 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1775 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1777 move(imm
, scratchReg3
);
1778 return branchSub32(cond
, scratchReg3
, dest
);
1781 Jump
branchSub32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1783 move(imm
, scratchReg3
);
1786 return branchSub32(cond
, scratchReg3
, dest
);
1789 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1793 return branchSub32(cond
, src2
, dest
);
1796 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1798 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1800 if (cond
== Signed
) {
1802 compare32(0, dest
, static_cast<RelationalCondition
>(LessThan
));
1803 return branchTrue();
1807 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1809 if (cond
== NotEqual
)
1810 return branchFalse();
1811 return branchTrue();
1814 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
1816 m_assembler
.ftrcdrmfpul(src
);
1817 m_assembler
.stsfpulReg(dest
);
1818 convertInt32ToDouble(dest
, fscratch
);
1819 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fscratch
, src
));
1821 if (dest
== SH4Registers::r0
)
1822 m_assembler
.cmpEqImmR0(0, dest
);
1824 m_assembler
.movImm8(0, scratchReg3
);
1825 m_assembler
.cmplRegReg(scratchReg3
, dest
, SH4Condition(Equal
));
1827 failureCases
.append(branchTrue());
1830 void neg32(RegisterID dst
)
1832 m_assembler
.neg(dst
, dst
);
1835 void urshift32(RegisterID shiftamount
, RegisterID dest
)
1837 if (shiftamount
== SH4Registers::r0
)
1838 m_assembler
.andlImm8r(0x1f, shiftamount
);
1840 RegisterID scr
= claimScratch();
1841 m_assembler
.loadConstant(0x1f, scr
);
1842 m_assembler
.andlRegReg(scr
, shiftamount
);
1843 releaseScratch(scr
);
1845 m_assembler
.neg(shiftamount
, shiftamount
);
1846 m_assembler
.shllRegReg(dest
, shiftamount
);
1849 void urshift32(TrustedImm32 imm
, RegisterID dest
)
1851 RegisterID scr
= claimScratch();
1852 m_assembler
.loadConstant(-(imm
.m_value
& 0x1f), scr
);
1853 m_assembler
.shaRegReg(dest
, scr
);
1854 releaseScratch(scr
);
1857 void urshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
1862 urshift32(shiftamount
, dest
);
1867 return Call(m_assembler
.call(), Call::Linkable
);
1872 return Call(m_assembler
.call(), Call::LinkableNear
);
1875 Call
call(RegisterID target
)
1877 return Call(m_assembler
.call(target
), Call::None
);
1880 void call(Address address
, RegisterID target
)
1882 load32(address
.base
, address
.offset
, target
);
1883 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
1884 m_assembler
.branch(JSR_OPCODE
, target
);
1894 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1896 RegisterID dataTempRegister
= claimScratch();
1898 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
1899 m_assembler
.cmplRegReg(dataTempRegister
, left
, SH4Condition(cond
));
1900 releaseScratch(dataTempRegister
);
1902 if (cond
== NotEqual
)
1903 return branchFalse();
1904 return branchTrue();
1907 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1909 RegisterID scr
= claimScratch();
1911 m_assembler
.loadConstant(left
.offset
, scr
);
1912 m_assembler
.addlRegReg(left
.base
, scr
);
1913 m_assembler
.movlMemReg(scr
, scr
);
1914 RegisterID scr1
= claimScratch();
1915 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
1916 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
1917 releaseScratch(scr
);
1918 releaseScratch(scr1
);
1920 if (cond
== NotEqual
)
1921 return branchFalse();
1922 return branchTrue();
1931 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
1933 RegisterID scr
= claimScratch();
1934 DataLabelPtr label
= moveWithPatch(initialValue
, scr
);
1935 store32(scr
, address
);
1936 releaseScratch(scr
);
1940 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
1942 int sizeOfConstantPool()
1944 return m_assembler
.sizeOfConstantPool();
1947 Call
tailRecursiveCall()
1949 RegisterID scr
= claimScratch();
1951 m_assembler
.loadConstantUnReusable(0x0, scr
, true);
1952 Jump m_jump
= Jump(m_assembler
.jmp(scr
));
1953 releaseScratch(scr
);
1955 return Call::fromTailJump(m_jump
);
1958 Call
makeTailRecursiveCall(Jump oldJump
)
1961 return tailRecursiveCall();
1969 static FunctionPtr
readCallTarget(CodeLocationCall call
)
1971 return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call
.dataLocation())));
1975 SH4Assembler::Condition
SH4Condition(RelationalCondition cond
)
1977 return static_cast<SH4Assembler::Condition
>(cond
);
1980 SH4Assembler::Condition
SH4Condition(ResultCondition cond
)
1982 return static_cast<SH4Assembler::Condition
>(cond
);
1985 friend class LinkBuffer
;
1986 friend class RepatchBuffer
;
1988 static void linkCall(void*, Call
, FunctionPtr
);
1989 static void repatchCall(CodeLocationCall
, CodeLocationLabel
);
1990 static void repatchCall(CodeLocationCall
, FunctionPtr
);
1995 #endif // ENABLE(ASSEMBLER)
1997 #endif // MacroAssemblerSH4_h