2 * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
3 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
4 * Copyright (C) 2008 Apple Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerSH4_h
29 #define MacroAssemblerSH4_h
31 #if ENABLE(ASSEMBLER) && CPU(SH4)
33 #include "SH4Assembler.h"
34 #include "AbstractMacroAssembler.h"
35 #include <wtf/Assertions.h>
39 class MacroAssemblerSH4
: public AbstractMacroAssembler
<SH4Assembler
> {
41 typedef SH4Assembler::FPRegisterID FPRegisterID
;
43 static const Scale ScalePtr
= TimesFour
;
44 static const FPRegisterID fscratch
= SH4Registers::fr10
;
45 static const RegisterID stackPointerRegister
= SH4Registers::sp
;
46 static const RegisterID linkRegister
= SH4Registers::pr
;
47 static const RegisterID scratchReg3
= SH4Registers::r13
;
49 static const int MaximumCompactPtrAlignedAddressOffset
= 60;
51 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
53 return (value
>= 0) && (value
<= MaximumCompactPtrAlignedAddressOffset
);
56 enum RelationalCondition
{
57 Equal
= SH4Assembler::EQ
,
58 NotEqual
= SH4Assembler::NE
,
59 Above
= SH4Assembler::HI
,
60 AboveOrEqual
= SH4Assembler::HS
,
61 Below
= SH4Assembler::LI
,
62 BelowOrEqual
= SH4Assembler::LS
,
63 GreaterThan
= SH4Assembler::GT
,
64 GreaterThanOrEqual
= SH4Assembler::GE
,
65 LessThan
= SH4Assembler::LT
,
66 LessThanOrEqual
= SH4Assembler::LE
69 enum ResultCondition
{
70 Overflow
= SH4Assembler::OF
,
71 Signed
= SH4Assembler::SI
,
72 PositiveOrZero
= SH4Assembler::NS
,
73 Zero
= SH4Assembler::EQ
,
74 NonZero
= SH4Assembler::NE
77 enum DoubleCondition
{
78 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
79 DoubleEqual
= SH4Assembler::EQ
,
80 DoubleNotEqual
= SH4Assembler::NE
,
81 DoubleGreaterThan
= SH4Assembler::GT
,
82 DoubleGreaterThanOrEqual
= SH4Assembler::GE
,
83 DoubleLessThan
= SH4Assembler::LT
,
84 DoubleLessThanOrEqual
= SH4Assembler::LE
,
85 // If either operand is NaN, these conditions always evaluate to true.
86 DoubleEqualOrUnordered
= SH4Assembler::EQU
,
87 DoubleNotEqualOrUnordered
= SH4Assembler::NEU
,
88 DoubleGreaterThanOrUnordered
= SH4Assembler::GTU
,
89 DoubleGreaterThanOrEqualOrUnordered
= SH4Assembler::GEU
,
90 DoubleLessThanOrUnordered
= SH4Assembler::LTU
,
91 DoubleLessThanOrEqualOrUnordered
= SH4Assembler::LEU
,
94 RegisterID
claimScratch()
96 return m_assembler
.claimScratch();
99 void releaseScratch(RegisterID reg
)
101 m_assembler
.releaseScratch(reg
);
104 // Integer arithmetic operations
106 void add32(RegisterID src
, RegisterID dest
)
108 m_assembler
.addlRegReg(src
, dest
);
111 void add32(TrustedImm32 imm
, RegisterID dest
)
116 if (m_assembler
.isImmediate(imm
.m_value
)) {
117 m_assembler
.addlImm8r(imm
.m_value
, dest
);
121 RegisterID scr
= claimScratch();
122 m_assembler
.loadConstant(imm
.m_value
, scr
);
123 m_assembler
.addlRegReg(scr
, dest
);
127 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
130 m_assembler
.movlRegReg(src
, dest
);
134 void add32(TrustedImm32 imm
, Address address
)
139 RegisterID scr
= claimScratch();
140 load32(address
, scr
);
142 store32(scr
, address
);
146 void add32(Address src
, RegisterID dest
)
148 RegisterID scr
= claimScratch();
150 m_assembler
.addlRegReg(scr
, dest
);
154 void add32(AbsoluteAddress src
, RegisterID dest
)
156 RegisterID scr
= claimScratch();
157 load32(src
.m_ptr
, scr
);
158 m_assembler
.addlRegReg(scr
, dest
);
162 void and32(RegisterID src
, RegisterID dest
)
164 m_assembler
.andlRegReg(src
, dest
);
167 void and32(TrustedImm32 imm
, RegisterID dest
)
169 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
170 m_assembler
.andlImm8r(imm
.m_value
, dest
);
174 RegisterID scr
= claimScratch();
175 m_assembler
.loadConstant(imm
.m_value
, scr
);
176 m_assembler
.andlRegReg(scr
, dest
);
180 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
191 void lshift32(RegisterID shiftamount
, RegisterID dest
)
193 RegisterID shiftTmp
= claimScratch();
194 m_assembler
.loadConstant(0x1f, shiftTmp
);
195 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
196 m_assembler
.shldRegReg(dest
, shiftTmp
);
197 releaseScratch(shiftTmp
);
200 void lshift32(TrustedImm32 imm
, RegisterID dest
)
202 int immMasked
= imm
.m_value
& 0x1f;
206 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
207 m_assembler
.shllImm8r(immMasked
, dest
);
211 RegisterID shiftTmp
= claimScratch();
212 m_assembler
.loadConstant(immMasked
, shiftTmp
);
213 m_assembler
.shldRegReg(dest
, shiftTmp
);
214 releaseScratch(shiftTmp
);
217 void lshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
222 lshift32(shiftamount
, dest
);
225 void mul32(RegisterID src
, RegisterID dest
)
227 m_assembler
.imullRegReg(src
, dest
);
228 m_assembler
.stsmacl(dest
);
231 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
233 RegisterID scr
= claimScratch();
241 void or32(RegisterID src
, RegisterID dest
)
243 m_assembler
.orlRegReg(src
, dest
);
246 void or32(TrustedImm32 imm
, RegisterID dest
)
248 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
249 m_assembler
.orlImm8r(imm
.m_value
, dest
);
253 RegisterID scr
= claimScratch();
254 m_assembler
.loadConstant(imm
.m_value
, scr
);
255 m_assembler
.orlRegReg(scr
, dest
);
259 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
263 else if (op1
== dest
)
271 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
282 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
293 void rshift32(RegisterID shiftamount
, RegisterID dest
)
295 RegisterID shiftTmp
= claimScratch();
296 m_assembler
.loadConstant(0x1f, shiftTmp
);
297 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
298 m_assembler
.neg(shiftTmp
, shiftTmp
);
299 m_assembler
.shadRegReg(dest
, shiftTmp
);
300 releaseScratch(shiftTmp
);
303 void rshift32(TrustedImm32 imm
, RegisterID dest
)
305 int immMasked
= imm
.m_value
& 0x1f;
309 if (immMasked
== 1) {
310 m_assembler
.sharImm8r(immMasked
, dest
);
314 RegisterID shiftTmp
= claimScratch();
315 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
316 m_assembler
.shadRegReg(dest
, shiftTmp
);
317 releaseScratch(shiftTmp
);
320 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
327 void sub32(RegisterID src
, RegisterID dest
)
329 m_assembler
.sublRegReg(src
, dest
);
332 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
337 RegisterID result
= claimScratch();
338 RegisterID scratchReg
= claimScratch();
340 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
341 m_assembler
.movlMemReg(scratchReg
, result
);
343 if (m_assembler
.isImmediate(-imm
.m_value
))
344 m_assembler
.addlImm8r(-imm
.m_value
, result
);
346 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
347 m_assembler
.sublRegReg(scratchReg3
, result
);
350 store32(result
, scratchReg
);
351 releaseScratch(result
);
352 releaseScratch(scratchReg
);
355 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
360 RegisterID result
= claimScratch();
361 RegisterID scratchReg
= claimScratch();
363 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
364 m_assembler
.movlMemReg(scratchReg
, result
);
366 if (m_assembler
.isImmediate(imm
.m_value
))
367 m_assembler
.addlImm8r(imm
.m_value
, result
);
369 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
370 m_assembler
.addlRegReg(scratchReg3
, result
);
373 store32(result
, scratchReg
);
374 releaseScratch(result
);
375 releaseScratch(scratchReg
);
378 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
380 RegisterID scr1
= claimScratch();
381 RegisterID scr2
= claimScratch();
383 // Add 32-bit LSB first.
384 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scr1
);
385 m_assembler
.movlMemReg(scr1
, scr1
); // scr1 = 32-bit LSB of int64 @ address
386 m_assembler
.loadConstant(imm
.m_value
, scr2
);
388 m_assembler
.addclRegReg(scr1
, scr2
);
389 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scr1
);
390 m_assembler
.movlRegMem(scr2
, scr1
); // Update address with 32-bit LSB result.
392 // Then add 32-bit MSB.
393 m_assembler
.addlImm8r(4, scr1
);
394 m_assembler
.movlMemReg(scr1
, scr1
); // scr1 = 32-bit MSB of int64 @ address
395 m_assembler
.movt(scr2
);
397 m_assembler
.addlImm8r(-1, scr2
); // Sign extend imm value if needed.
398 m_assembler
.addvlRegReg(scr2
, scr1
);
399 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
) + 4, scr2
);
400 m_assembler
.movlRegMem(scr1
, scr2
); // Update (address + 4) with 32-bit MSB result.
402 releaseScratch(scr2
);
403 releaseScratch(scr1
);
406 void sub32(TrustedImm32 imm
, RegisterID dest
)
411 if (m_assembler
.isImmediate(-imm
.m_value
)) {
412 m_assembler
.addlImm8r(-imm
.m_value
, dest
);
416 RegisterID scr
= claimScratch();
417 m_assembler
.loadConstant(imm
.m_value
, scr
);
418 m_assembler
.sublRegReg(scr
, dest
);
422 void sub32(Address src
, RegisterID dest
)
424 RegisterID scr
= claimScratch();
426 m_assembler
.sublRegReg(scr
, dest
);
430 void xor32(RegisterID src
, RegisterID dest
)
432 m_assembler
.xorlRegReg(src
, dest
);
435 void xor32(TrustedImm32 imm
, RegisterID srcDest
)
437 if (imm
.m_value
== -1) {
438 m_assembler
.notlReg(srcDest
, srcDest
);
442 if ((srcDest
!= SH4Registers::r0
) || (imm
.m_value
> 255) || (imm
.m_value
< 0)) {
443 RegisterID scr
= claimScratch();
444 m_assembler
.loadConstant(imm
.m_value
, scr
);
445 m_assembler
.xorlRegReg(scr
, srcDest
);
450 m_assembler
.xorlImm8r(imm
.m_value
, srcDest
);
453 void compare32(int imm
, RegisterID dst
, RelationalCondition cond
)
455 if (((cond
== Equal
) || (cond
== NotEqual
)) && (dst
== SH4Registers::r0
) && m_assembler
.isImmediate(imm
)) {
456 m_assembler
.cmpEqImmR0(imm
, dst
);
460 RegisterID scr
= claimScratch();
461 m_assembler
.loadConstant(imm
, scr
);
462 m_assembler
.cmplRegReg(scr
, dst
, SH4Condition(cond
));
466 void compare32(int offset
, RegisterID base
, RegisterID left
, RelationalCondition cond
)
468 RegisterID scr
= claimScratch();
470 m_assembler
.movlMemReg(base
, scr
);
471 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
476 if ((offset
< 0) || (offset
>= 64)) {
477 m_assembler
.loadConstant(offset
, scr
);
478 m_assembler
.addlRegReg(base
, scr
);
479 m_assembler
.movlMemReg(scr
, scr
);
480 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
485 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
486 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
490 void testImm(int imm
, int offset
, RegisterID base
)
492 RegisterID scr
= claimScratch();
493 RegisterID scr1
= claimScratch();
495 if ((offset
< 0) || (offset
>= 64)) {
496 m_assembler
.loadConstant(offset
, scr
);
497 m_assembler
.addlRegReg(base
, scr
);
498 m_assembler
.movlMemReg(scr
, scr
);
500 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
502 m_assembler
.movlMemReg(base
, scr
);
503 if (m_assembler
.isImmediate(imm
))
504 m_assembler
.movImm8(imm
, scr1
);
506 m_assembler
.loadConstant(imm
, scr1
);
508 m_assembler
.testlRegReg(scr
, scr1
);
510 releaseScratch(scr1
);
513 void testlImm(int imm
, RegisterID dst
)
515 if ((dst
== SH4Registers::r0
) && (imm
<= 255) && (imm
>= 0)) {
516 m_assembler
.testlImm8r(imm
, dst
);
520 RegisterID scr
= claimScratch();
521 m_assembler
.loadConstant(imm
, scr
);
522 m_assembler
.testlRegReg(scr
, dst
);
526 void compare32(RegisterID right
, int offset
, RegisterID base
, RelationalCondition cond
)
529 RegisterID scr
= claimScratch();
530 m_assembler
.movlMemReg(base
, scr
);
531 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
536 if ((offset
< 0) || (offset
>= 64)) {
537 RegisterID scr
= claimScratch();
538 m_assembler
.loadConstant(offset
, scr
);
539 m_assembler
.addlRegReg(base
, scr
);
540 m_assembler
.movlMemReg(scr
, scr
);
541 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
546 RegisterID scr
= claimScratch();
547 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
548 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
552 void compare32(int imm
, int offset
, RegisterID base
, RelationalCondition cond
)
555 RegisterID scr
= claimScratch();
556 RegisterID scr1
= claimScratch();
557 m_assembler
.movlMemReg(base
, scr
);
558 m_assembler
.loadConstant(imm
, scr1
);
559 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
560 releaseScratch(scr1
);
565 if ((offset
< 0) || (offset
>= 64)) {
566 RegisterID scr
= claimScratch();
567 RegisterID scr1
= claimScratch();
568 m_assembler
.loadConstant(offset
, scr
);
569 m_assembler
.addlRegReg(base
, scr
);
570 m_assembler
.movlMemReg(scr
, scr
);
571 m_assembler
.loadConstant(imm
, scr1
);
572 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
573 releaseScratch(scr1
);
578 RegisterID scr
= claimScratch();
579 RegisterID scr1
= claimScratch();
580 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
581 m_assembler
.loadConstant(imm
, scr1
);
582 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
583 releaseScratch(scr1
);
587 // Memory access operation
589 void load32(ImplicitAddress address
, RegisterID dest
)
591 load32(address
.base
, address
.offset
, dest
);
594 void load8(ImplicitAddress address
, RegisterID dest
)
596 load8(address
.base
, address
.offset
, dest
);
599 void load8(BaseIndex address
, RegisterID dest
)
601 RegisterID scr
= claimScratch();
602 move(address
.index
, scr
);
603 lshift32(TrustedImm32(address
.scale
), scr
);
604 add32(address
.base
, scr
);
605 load8(scr
, address
.offset
, dest
);
609 void load8PostInc(RegisterID base
, RegisterID dest
)
611 m_assembler
.movbMemRegIn(base
, dest
);
612 m_assembler
.extub(dest
, dest
);
615 void load8Signed(BaseIndex address
, RegisterID dest
)
617 RegisterID scr
= claimScratch();
618 move(address
.index
, scr
);
619 lshift32(TrustedImm32(address
.scale
), scr
);
620 add32(address
.base
, scr
);
621 load8Signed(scr
, address
.offset
, dest
);
625 void load32(BaseIndex address
, RegisterID dest
)
627 RegisterID scr
= claimScratch();
628 move(address
.index
, scr
);
629 lshift32(TrustedImm32(address
.scale
), scr
);
630 add32(address
.base
, scr
);
631 load32(scr
, address
.offset
, dest
);
635 void load32(const void* address
, RegisterID dest
)
637 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address
)), dest
);
638 m_assembler
.movlMemReg(dest
, dest
);
641 void load32(RegisterID base
, int offset
, RegisterID dest
)
644 m_assembler
.movlMemReg(base
, dest
);
648 if ((offset
>= 0) && (offset
< 64)) {
649 m_assembler
.movlMemReg(offset
>> 2, base
, dest
);
653 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
655 m_assembler
.loadConstant(offset
, scr
);
656 if (base
== SH4Registers::r0
)
657 m_assembler
.movlR0mr(scr
, dest
);
659 m_assembler
.addlRegReg(base
, scr
);
660 m_assembler
.movlMemReg(scr
, dest
);
667 void load8Signed(RegisterID base
, int offset
, RegisterID dest
)
670 m_assembler
.movbMemReg(base
, dest
);
674 if ((offset
> 0) && (offset
<= 15) && (dest
== SH4Registers::r0
)) {
675 m_assembler
.movbMemReg(offset
, base
, dest
);
679 RegisterID scr
= (dest
== base
) ? claimScratch() : dest
;
681 m_assembler
.loadConstant(offset
, scr
);
682 if (base
== SH4Registers::r0
)
683 m_assembler
.movbR0mr(scr
, dest
);
685 m_assembler
.addlRegReg(base
, scr
);
686 m_assembler
.movbMemReg(scr
, dest
);
693 void load8(RegisterID base
, int offset
, RegisterID dest
)
695 load8Signed(base
, offset
, dest
);
696 m_assembler
.extub(dest
, dest
);
699 void load32(RegisterID src
, RegisterID dst
)
701 m_assembler
.movlMemReg(src
, dst
);
704 void load16(ImplicitAddress address
, RegisterID dest
)
706 if (!address
.offset
) {
707 m_assembler
.movwMemReg(address
.base
, dest
);
708 m_assembler
.extuw(dest
, dest
);
712 if ((address
.offset
> 0) && (address
.offset
<= 30) && (dest
== SH4Registers::r0
)) {
713 m_assembler
.movwMemReg(address
.offset
>> 1, address
.base
, dest
);
714 m_assembler
.extuw(dest
, dest
);
718 RegisterID scr
= (dest
== address
.base
) ? claimScratch() : dest
;
720 m_assembler
.loadConstant(address
.offset
, scr
);
721 if (address
.base
== SH4Registers::r0
)
722 m_assembler
.movwR0mr(scr
, dest
);
724 m_assembler
.addlRegReg(address
.base
, scr
);
725 m_assembler
.movwMemReg(scr
, dest
);
727 m_assembler
.extuw(dest
, dest
);
729 if (dest
== address
.base
)
733 void load16Unaligned(BaseIndex address
, RegisterID dest
)
735 RegisterID scr
= claimScratch();
736 RegisterID scr1
= claimScratch();
738 move(address
.index
, scr
);
739 lshift32(TrustedImm32(address
.scale
), scr
);
742 add32(TrustedImm32(address
.offset
), scr
);
744 add32(address
.base
, scr
);
745 load8PostInc(scr
, scr1
);
747 m_assembler
.shllImm8r(8, dest
);
751 releaseScratch(scr1
);
754 void load16(RegisterID src
, RegisterID dest
)
756 m_assembler
.movwMemReg(src
, dest
);
757 m_assembler
.extuw(dest
, dest
);
760 void load16Signed(RegisterID src
, RegisterID dest
)
762 m_assembler
.movwMemReg(src
, dest
);
765 void load16(BaseIndex address
, RegisterID dest
)
767 load16Signed(address
, dest
);
768 m_assembler
.extuw(dest
, dest
);
771 void load16PostInc(RegisterID base
, RegisterID dest
)
773 m_assembler
.movwMemRegIn(base
, dest
);
774 m_assembler
.extuw(dest
, dest
);
777 void load16Signed(BaseIndex address
, RegisterID dest
)
779 RegisterID scr
= claimScratch();
781 move(address
.index
, scr
);
782 lshift32(TrustedImm32(address
.scale
), scr
);
785 add32(TrustedImm32(address
.offset
), scr
);
787 if (address
.base
== SH4Registers::r0
)
788 m_assembler
.movwR0mr(scr
, dest
);
790 add32(address
.base
, scr
);
791 load16Signed(scr
, dest
);
797 void store8(RegisterID src
, BaseIndex address
)
799 RegisterID scr
= claimScratch();
801 move(address
.index
, scr
);
802 lshift32(TrustedImm32(address
.scale
), scr
);
803 add32(TrustedImm32(address
.offset
), scr
);
805 if (address
.base
== SH4Registers::r0
)
806 m_assembler
.movbRegMemr0(src
, scr
);
808 add32(address
.base
, scr
);
809 m_assembler
.movbRegMem(src
, scr
);
815 void store16(RegisterID src
, BaseIndex address
)
817 RegisterID scr
= claimScratch();
819 move(address
.index
, scr
);
820 lshift32(TrustedImm32(address
.scale
), scr
);
821 add32(TrustedImm32(address
.offset
), scr
);
823 if (address
.base
== SH4Registers::r0
)
824 m_assembler
.movwRegMemr0(src
, scr
);
826 add32(address
.base
, scr
);
827 m_assembler
.movwRegMem(src
, scr
);
833 void store32(RegisterID src
, ImplicitAddress address
)
835 if (!address
.offset
) {
836 m_assembler
.movlRegMem(src
, address
.base
);
840 if ((address
.offset
>= 0) && (address
.offset
< 64)) {
841 m_assembler
.movlRegMem(src
, address
.offset
>> 2, address
.base
);
845 RegisterID scr
= claimScratch();
846 m_assembler
.loadConstant(address
.offset
, scr
);
847 if (address
.base
== SH4Registers::r0
)
848 m_assembler
.movlRegMemr0(src
, scr
);
850 m_assembler
.addlRegReg(address
.base
, scr
);
851 m_assembler
.movlRegMem(src
, scr
);
856 void store32(RegisterID src
, RegisterID dst
)
858 m_assembler
.movlRegMem(src
, dst
);
861 void store32(TrustedImm32 imm
, ImplicitAddress address
)
863 RegisterID scr
= claimScratch();
864 m_assembler
.loadConstant(imm
.m_value
, scr
);
865 store32(scr
, address
);
869 void store32(RegisterID src
, BaseIndex address
)
871 RegisterID scr
= claimScratch();
873 move(address
.index
, scr
);
874 lshift32(TrustedImm32(address
.scale
), scr
);
875 add32(address
.base
, scr
);
876 store32(src
, Address(scr
, address
.offset
));
881 void store32(TrustedImm32 imm
, void* address
)
883 RegisterID scr
= claimScratch();
884 RegisterID scr1
= claimScratch();
885 m_assembler
.loadConstant(imm
.m_value
, scr
);
886 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr1
);
887 m_assembler
.movlRegMem(scr
, scr1
);
889 releaseScratch(scr1
);
892 void store32(RegisterID src
, void* address
)
894 RegisterID scr
= claimScratch();
895 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
896 m_assembler
.movlRegMem(src
, scr
);
900 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
902 RegisterID scr
= claimScratch();
903 DataLabel32
label(this);
904 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
905 m_assembler
.addlRegReg(address
.base
, scr
);
906 m_assembler
.movlMemReg(scr
, dest
);
911 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
913 RegisterID scr
= claimScratch();
914 DataLabel32
label(this);
915 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
916 m_assembler
.addlRegReg(address
.base
, scr
);
917 m_assembler
.movlRegMem(src
, scr
);
922 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
924 DataLabelCompact
dataLabel(this);
925 ASSERT(address
.offset
<= MaximumCompactPtrAlignedAddressOffset
);
926 ASSERT(address
.offset
>= 0);
927 m_assembler
.movlMemRegCompact(address
.offset
>> 2, address
.base
, dest
);
931 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
933 ConvertibleLoadLabel
result(this);
935 RegisterID scr
= claimScratch();
936 m_assembler
.movImm8(address
.offset
, scr
);
937 m_assembler
.addlRegReg(address
.base
, scr
);
938 m_assembler
.movlMemReg(scr
, dest
);
944 // Floating-point operations
946 static bool supportsFloatingPoint() { return true; }
947 static bool supportsFloatingPointTruncate() { return true; }
948 static bool supportsFloatingPointSqrt() { return true; }
949 static bool supportsFloatingPointAbs() { return true; }
951 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
953 m_assembler
.fldsfpul((FPRegisterID
)(src
+ 1));
954 m_assembler
.stsfpulReg(dest1
);
955 m_assembler
.fldsfpul(src
);
956 m_assembler
.stsfpulReg(dest2
);
959 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID scratch
)
961 UNUSED_PARAM(scratch
);
962 m_assembler
.ldsrmfpul(src1
);
963 m_assembler
.fstsfpul((FPRegisterID
)(dest
+ 1));
964 m_assembler
.ldsrmfpul(src2
);
965 m_assembler
.fstsfpul(dest
);
968 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
971 m_assembler
.fmovsRegReg((FPRegisterID
)(src
+ 1), (FPRegisterID
)(dest
+ 1));
972 m_assembler
.fmovsRegReg(src
, dest
);
976 void loadFloat(BaseIndex address
, FPRegisterID dest
)
978 RegisterID scr
= claimScratch();
980 move(address
.index
, scr
);
981 lshift32(TrustedImm32(address
.scale
), scr
);
982 add32(address
.base
, scr
);
984 add32(TrustedImm32(address
.offset
), scr
);
986 m_assembler
.fmovsReadrm(scr
, dest
);
990 void loadDouble(BaseIndex address
, FPRegisterID dest
)
992 RegisterID scr
= claimScratch();
994 move(address
.index
, scr
);
995 lshift32(TrustedImm32(address
.scale
), scr
);
996 add32(address
.base
, scr
);
998 add32(TrustedImm32(address
.offset
), scr
);
1000 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1001 m_assembler
.fmovsReadrm(scr
, dest
);
1002 releaseScratch(scr
);
1005 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
1007 RegisterID scr
= claimScratch();
1009 m_assembler
.loadConstant(address
.offset
, scr
);
1010 if (address
.base
== SH4Registers::r0
) {
1011 m_assembler
.fmovsReadr0r(scr
, (FPRegisterID
)(dest
+ 1));
1012 m_assembler
.addlImm8r(4, scr
);
1013 m_assembler
.fmovsReadr0r(scr
, dest
);
1014 releaseScratch(scr
);
1018 m_assembler
.addlRegReg(address
.base
, scr
);
1019 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1020 m_assembler
.fmovsReadrm(scr
, dest
);
1021 releaseScratch(scr
);
1024 void loadDouble(const void* address
, FPRegisterID dest
)
1026 RegisterID scr
= claimScratch();
1027 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
1028 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
1029 m_assembler
.fmovsReadrm(scr
, dest
);
1030 releaseScratch(scr
);
1033 void storeFloat(FPRegisterID src
, BaseIndex address
)
1035 RegisterID scr
= claimScratch();
1037 move(address
.index
, scr
);
1038 lshift32(TrustedImm32(address
.scale
), scr
);
1039 add32(address
.base
, scr
);
1041 add32(TrustedImm32(address
.offset
), scr
);
1043 m_assembler
.fmovsWriterm(src
, scr
);
1045 releaseScratch(scr
);
1048 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
1050 RegisterID scr
= claimScratch();
1051 m_assembler
.loadConstant(address
.offset
+ 8, scr
);
1052 m_assembler
.addlRegReg(address
.base
, scr
);
1053 m_assembler
.fmovsWriterndec(src
, scr
);
1054 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1055 releaseScratch(scr
);
1058 void storeDouble(FPRegisterID src
, BaseIndex address
)
1060 RegisterID scr
= claimScratch();
1062 move(address
.index
, scr
);
1063 lshift32(TrustedImm32(address
.scale
), scr
);
1064 add32(address
.base
, scr
);
1065 add32(TrustedImm32(address
.offset
+ 8), scr
);
1067 m_assembler
.fmovsWriterndec(src
, scr
);
1068 m_assembler
.fmovsWriterndec((FPRegisterID
)(src
+ 1), scr
);
1070 releaseScratch(scr
);
1073 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
1076 addDouble(op2
, dest
);
1078 moveDouble(op2
, dest
);
1079 addDouble(op1
, dest
);
1083 void addDouble(FPRegisterID src
, FPRegisterID dest
)
1085 m_assembler
.daddRegReg(src
, dest
);
1088 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
1090 loadDouble(address
.m_ptr
, fscratch
);
1091 addDouble(fscratch
, dest
);
1094 void addDouble(Address address
, FPRegisterID dest
)
1096 loadDouble(address
, fscratch
);
1097 addDouble(fscratch
, dest
);
1100 void subDouble(FPRegisterID src
, FPRegisterID dest
)
1102 m_assembler
.dsubRegReg(src
, dest
);
1105 void subDouble(Address address
, FPRegisterID dest
)
1107 loadDouble(address
, fscratch
);
1108 subDouble(fscratch
, dest
);
1111 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
1113 m_assembler
.dmulRegReg(src
, dest
);
1116 void mulDouble(Address address
, FPRegisterID dest
)
1118 loadDouble(address
, fscratch
);
1119 mulDouble(fscratch
, dest
);
1122 void divDouble(FPRegisterID src
, FPRegisterID dest
)
1124 m_assembler
.ddivRegReg(src
, dest
);
1127 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
1129 m_assembler
.fldsfpul(src
);
1130 m_assembler
.dcnvsd(dst
);
1133 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
1135 m_assembler
.dcnvds(src
);
1136 m_assembler
.fstsfpul(dst
);
1139 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
1141 m_assembler
.ldsrmfpul(src
);
1142 m_assembler
.floatfpulDreg(dest
);
1145 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
1147 RegisterID scr
= claimScratch();
1148 load32(src
.m_ptr
, scr
);
1149 convertInt32ToDouble(scr
, dest
);
1150 releaseScratch(scr
);
1153 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
1155 RegisterID scr
= claimScratch();
1157 convertInt32ToDouble(scr
, dest
);
1158 releaseScratch(scr
);
1161 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
1163 RegisterID scr
= claimScratch();
1164 RegisterID scr1
= claimScratch();
1168 if (dest
!= SH4Registers::r0
)
1169 move(SH4Registers::r0
, scr1
);
1171 move(address
.index
, scr
);
1172 lshift32(TrustedImm32(address
.scale
), scr
);
1173 add32(address
.base
, scr
);
1176 add32(TrustedImm32(address
.offset
), scr
);
1178 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 58, sizeof(uint32_t));
1179 move(scr
, SH4Registers::r0
);
1180 m_assembler
.testlImm8r(0x3, SH4Registers::r0
);
1181 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1183 if (dest
!= SH4Registers::r0
)
1184 move(scr1
, SH4Registers::r0
);
1187 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1190 m_assembler
.testlImm8r(0x1, SH4Registers::r0
);
1192 if (dest
!= SH4Registers::r0
)
1193 move(scr1
, SH4Registers::r0
);
1195 m_jump
= Jump(m_assembler
.jne(), SH4Assembler::JumpNear
);
1196 load16PostInc(scr
, scr1
);
1198 m_assembler
.shllImm8r(16, dest
);
1200 end
.append(Jump(m_assembler
.bra(), SH4Assembler::JumpNear
));
1203 load8PostInc(scr
, scr1
);
1204 load16PostInc(scr
, dest
);
1205 m_assembler
.shllImm8r(8, dest
);
1208 m_assembler
.shllImm8r(8, dest
);
1209 m_assembler
.shllImm8r(16, dest
);
1213 releaseScratch(scr
);
1214 releaseScratch(scr1
);
1217 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1219 RegisterID scr
= scratchReg3
;
1220 load32WithUnalignedHalfWords(left
, scr
);
1221 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1222 m_assembler
.testlRegReg(scr
, scr
);
1224 compare32(right
.m_value
, scr
, cond
);
1226 if (cond
== NotEqual
)
1227 return branchFalse();
1228 return branchTrue();
1231 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
1233 m_assembler
.movImm8(0, scratchReg3
);
1234 convertInt32ToDouble(scratchReg3
, scratch
);
1235 return branchDouble(DoubleNotEqual
, reg
, scratch
);
1238 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
1240 m_assembler
.movImm8(0, scratchReg3
);
1241 convertInt32ToDouble(scratchReg3
, scratch
);
1242 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
1245 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
1247 if (cond
== DoubleEqual
) {
1248 m_assembler
.dcmppeq(right
, left
);
1249 return branchTrue();
1252 if (cond
== DoubleNotEqual
) {
1254 m_assembler
.dcmppeq(left
, left
);
1255 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1256 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1257 m_assembler
.dcmppeq(right
, right
);
1258 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1259 m_assembler
.dcmppeq(right
, left
);
1260 Jump m_jump
= branchFalse();
1265 if (cond
== DoubleGreaterThan
) {
1266 m_assembler
.dcmppgt(right
, left
);
1267 return branchTrue();
1270 if (cond
== DoubleGreaterThanOrEqual
) {
1272 m_assembler
.dcmppeq(left
, left
);
1273 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1274 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1275 m_assembler
.dcmppeq(right
, right
);
1276 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1277 m_assembler
.dcmppgt(left
, right
);
1278 Jump m_jump
= branchFalse();
1283 if (cond
== DoubleLessThan
) {
1284 m_assembler
.dcmppgt(left
, right
);
1285 return branchTrue();
1288 if (cond
== DoubleLessThanOrEqual
) {
1290 m_assembler
.dcmppeq(left
, left
);
1291 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1292 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1293 m_assembler
.dcmppeq(right
, right
);
1294 end
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1295 m_assembler
.dcmppgt(right
, left
);
1296 Jump m_jump
= branchFalse();
1301 if (cond
== DoubleEqualOrUnordered
) {
1302 JumpList takeBranch
;
1303 m_assembler
.dcmppeq(left
, left
);
1304 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1305 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1306 m_assembler
.dcmppeq(right
, right
);
1307 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1308 m_assembler
.dcmppeq(left
, right
);
1309 Jump m_jump
= Jump(m_assembler
.je());
1310 takeBranch
.link(this);
1311 m_assembler
.extraInstrForBranch(scratchReg3
);
1315 if (cond
== DoubleGreaterThanOrUnordered
) {
1316 JumpList takeBranch
;
1317 m_assembler
.dcmppeq(left
, left
);
1318 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1319 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1320 m_assembler
.dcmppeq(right
, right
);
1321 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1322 m_assembler
.dcmppgt(right
, left
);
1323 Jump m_jump
= Jump(m_assembler
.je());
1324 takeBranch
.link(this);
1325 m_assembler
.extraInstrForBranch(scratchReg3
);
1329 if (cond
== DoubleGreaterThanOrEqualOrUnordered
) {
1330 m_assembler
.dcmppgt(left
, right
);
1331 return branchFalse();
1334 if (cond
== DoubleLessThanOrUnordered
) {
1335 JumpList takeBranch
;
1336 m_assembler
.dcmppeq(left
, left
);
1337 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1338 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1339 m_assembler
.dcmppeq(right
, right
);
1340 takeBranch
.append(Jump(m_assembler
.jne(), SH4Assembler::JumpNear
));
1341 m_assembler
.dcmppgt(left
, right
);
1342 Jump m_jump
= Jump(m_assembler
.je());
1343 takeBranch
.link(this);
1344 m_assembler
.extraInstrForBranch(scratchReg3
);
1348 if (cond
== DoubleLessThanOrEqualOrUnordered
) {
1349 m_assembler
.dcmppgt(right
, left
);
1350 return branchFalse();
1353 ASSERT(cond
== DoubleNotEqualOrUnordered
);
1354 m_assembler
.dcmppeq(right
, left
);
1355 return branchFalse();
1360 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1361 Jump m_jump
= Jump(m_assembler
.je());
1362 m_assembler
.extraInstrForBranch(scratchReg3
);
1368 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1369 Jump m_jump
= Jump(m_assembler
.jne());
1370 m_assembler
.extraInstrForBranch(scratchReg3
);
1374 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1376 RegisterID scr
= claimScratch();
1377 move(left
.index
, scr
);
1378 lshift32(TrustedImm32(left
.scale
), scr
);
1379 add32(left
.base
, scr
);
1380 load32(scr
, left
.offset
, scr
);
1381 compare32(right
.m_value
, scr
, cond
);
1382 releaseScratch(scr
);
1384 if (cond
== NotEqual
)
1385 return branchFalse();
1386 return branchTrue();
1389 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1391 moveDouble(src
, dest
);
1392 m_assembler
.dsqrt(dest
);
1395 void absDouble(FPRegisterID src
, FPRegisterID dest
)
1397 moveDouble(src
, dest
);
1398 m_assembler
.dabs(dest
);
1401 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1403 RegisterID addressTempRegister
= claimScratch();
1404 load8(address
, addressTempRegister
);
1405 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1406 releaseScratch(addressTempRegister
);
1410 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
1412 RegisterID addressTempRegister
= claimScratch();
1413 move(TrustedImmPtr(address
.m_ptr
), addressTempRegister
);
1414 load8(Address(addressTempRegister
), addressTempRegister
);
1415 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1416 releaseScratch(addressTempRegister
);
1420 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1426 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1428 RegisterID addressTempRegister
= claimScratch();
1429 load8(left
, addressTempRegister
);
1430 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1431 releaseScratch(addressTempRegister
);
1435 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1437 RegisterID addressTempRegister
= claimScratch();
1438 load8(left
, addressTempRegister
);
1439 compare32(cond
, addressTempRegister
, right
, dest
);
1440 releaseScratch(addressTempRegister
);
1443 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
1444 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
1446 m_assembler
.ftrcdrmfpul(src
);
1447 m_assembler
.stsfpulReg(dest
);
1448 m_assembler
.loadConstant(0x7fffffff, scratchReg3
);
1449 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1450 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 14, sizeof(uint32_t));
1451 m_assembler
.branch(BT_OPCODE
, 2);
1452 m_assembler
.addlImm8r(1, scratchReg3
);
1453 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1454 return (branchType
== BranchIfTruncateFailed
) ? branchTrue() : branchFalse();
1457 // Stack manipulation operations
1459 void pop(RegisterID dest
)
1461 m_assembler
.popReg(dest
);
1464 void push(RegisterID src
)
1466 m_assembler
.pushReg(src
);
1469 void push(TrustedImm32 imm
)
1471 RegisterID scr
= claimScratch();
1472 m_assembler
.loadConstant(imm
.m_value
, scr
);
1474 releaseScratch(scr
);
1477 // Register move operations
1479 void move(TrustedImm32 imm
, RegisterID dest
)
1481 m_assembler
.loadConstant(imm
.m_value
, dest
);
1484 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
1486 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
, sizeof(uint32_t));
1487 DataLabelPtr
dataLabel(this);
1488 m_assembler
.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue
.m_value
), dest
);
1492 void move(RegisterID src
, RegisterID dest
)
1495 m_assembler
.movlRegReg(src
, dest
);
1498 void move(TrustedImmPtr imm
, RegisterID dest
)
1500 m_assembler
.loadConstant(imm
.asIntptr(), dest
);
1503 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1505 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1506 if (cond
!= NotEqual
) {
1507 m_assembler
.movt(dest
);
1511 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1512 m_assembler
.movImm8(0, dest
);
1513 m_assembler
.branch(BT_OPCODE
, 0);
1514 m_assembler
.movImm8(1, dest
);
1517 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1521 compare32(cond
, left
, dest
, dest
);
1525 RegisterID scr
= claimScratch();
1527 compare32(cond
, left
, scr
, dest
);
1528 releaseScratch(scr
);
1531 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1533 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1535 load8(address
, dest
);
1536 if (mask
.m_value
== -1)
1537 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1539 testlImm(mask
.m_value
, dest
);
1540 if (cond
!= NonZero
) {
1541 m_assembler
.movt(dest
);
1545 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1546 m_assembler
.movImm8(0, dest
);
1547 m_assembler
.branch(BT_OPCODE
, 0);
1548 m_assembler
.movImm8(1, dest
);
1551 void loadPtrLinkReg(ImplicitAddress address
)
1553 RegisterID scr
= claimScratch();
1554 load32(address
, scr
);
1555 m_assembler
.ldspr(scr
);
1556 releaseScratch(scr
);
1559 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1561 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1562 /* BT label => BF off
1567 if (cond
== NotEqual
)
1568 return branchFalse();
1569 return branchTrue();
1572 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1574 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1575 m_assembler
.testlRegReg(left
, left
);
1577 compare32(right
.m_value
, left
, cond
);
1579 if (cond
== NotEqual
)
1580 return branchFalse();
1581 return branchTrue();
1584 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1586 compare32(right
.offset
, right
.base
, left
, cond
);
1587 if (cond
== NotEqual
)
1588 return branchFalse();
1589 return branchTrue();
1592 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1594 compare32(right
, left
.offset
, left
.base
, cond
);
1595 if (cond
== NotEqual
)
1596 return branchFalse();
1597 return branchTrue();
1600 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1602 compare32(right
.m_value
, left
.offset
, left
.base
, cond
);
1603 if (cond
== NotEqual
)
1604 return branchFalse();
1605 return branchTrue();
1608 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1610 RegisterID scr
= claimScratch();
1612 load32(left
.m_ptr
, scr
);
1613 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
1614 releaseScratch(scr
);
1616 if (cond
== NotEqual
)
1617 return branchFalse();
1618 return branchTrue();
1621 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1623 RegisterID addressTempRegister
= claimScratch();
1625 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(left
.m_ptr
), addressTempRegister
);
1626 m_assembler
.movlMemReg(addressTempRegister
, addressTempRegister
);
1627 compare32(right
.m_value
, addressTempRegister
, cond
);
1628 releaseScratch(addressTempRegister
);
1630 if (cond
== NotEqual
)
1631 return branchFalse();
1632 return branchTrue();
1635 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1637 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1638 RegisterID scr
= claimScratch();
1640 move(left
.index
, scr
);
1641 lshift32(TrustedImm32(left
.scale
), scr
);
1644 add32(TrustedImm32(left
.offset
), scr
);
1645 add32(left
.base
, scr
);
1647 RegisterID scr1
= claimScratch();
1648 m_assembler
.loadConstant(right
.m_value
, scr1
);
1649 releaseScratch(scr
);
1650 releaseScratch(scr1
);
1652 return branch32(cond
, scr
, scr1
);
1655 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1657 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1659 m_assembler
.testlRegReg(reg
, mask
);
1661 if (cond
== NonZero
) // NotEqual
1662 return branchFalse();
1663 return branchTrue();
1666 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1668 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1670 if (mask
.m_value
== -1)
1671 m_assembler
.testlRegReg(reg
, reg
);
1673 testlImm(mask
.m_value
, reg
);
1675 if (cond
== NonZero
) // NotEqual
1676 return branchFalse();
1677 return branchTrue();
1680 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1682 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1684 if (mask
.m_value
== -1)
1685 compare32(0, address
.offset
, address
.base
, static_cast<RelationalCondition
>(cond
));
1687 testImm(mask
.m_value
, address
.offset
, address
.base
);
1689 if (cond
== NonZero
) // NotEqual
1690 return branchFalse();
1691 return branchTrue();
1694 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1696 RegisterID scr
= claimScratch();
1698 move(address
.index
, scr
);
1699 lshift32(TrustedImm32(address
.scale
), scr
);
1700 add32(address
.base
, scr
);
1701 load32(scr
, address
.offset
, scr
);
1703 if (mask
.m_value
== -1)
1704 m_assembler
.testlRegReg(scr
, scr
);
1706 testlImm(mask
.m_value
, scr
);
1708 releaseScratch(scr
);
1710 if (cond
== NonZero
) // NotEqual
1711 return branchFalse();
1712 return branchTrue();
1717 return Jump(m_assembler
.jmp());
1720 void jump(RegisterID target
)
1722 m_assembler
.jmpReg(target
);
1725 void jump(Address address
)
1727 RegisterID scr
= claimScratch();
1729 if ((address
.offset
< 0) || (address
.offset
>= 64)) {
1730 m_assembler
.loadConstant(address
.offset
, scr
);
1731 m_assembler
.addlRegReg(address
.base
, scr
);
1732 m_assembler
.movlMemReg(scr
, scr
);
1733 } else if (address
.offset
)
1734 m_assembler
.movlMemReg(address
.offset
>> 2, address
.base
, scr
);
1736 m_assembler
.movlMemReg(address
.base
, scr
);
1737 m_assembler
.jmpReg(scr
);
1739 releaseScratch(scr
);
1742 // Arithmetic control flow operations
1744 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1746 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
1748 if (cond
== Overflow
) {
1749 m_assembler
.addvlRegReg(src
, dest
);
1750 return branchTrue();
1753 if (cond
== Signed
) {
1754 m_assembler
.addlRegReg(src
, dest
);
1755 // Check if dest is negative
1756 m_assembler
.cmppz(dest
);
1757 return branchFalse();
1760 if (cond
== PositiveOrZero
) {
1761 m_assembler
.addlRegReg(src
, dest
);
1762 m_assembler
.cmppz(dest
);
1763 return branchTrue();
1766 m_assembler
.addlRegReg(src
, dest
);
1767 compare32(0, dest
, Equal
);
1769 if (cond
== NonZero
) // NotEqual
1770 return branchFalse();
1771 return branchTrue();
1774 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1776 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
1778 move(imm
, scratchReg3
);
1779 return branchAdd32(cond
, scratchReg3
, dest
);
1782 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1784 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
1789 if (cond
== Overflow
) {
1790 move(imm
, scratchReg3
);
1791 m_assembler
.addvlRegReg(scratchReg3
, dest
);
1792 return branchTrue();
1797 if (cond
== Signed
) {
1798 m_assembler
.cmppz(dest
);
1799 return branchFalse();
1802 if (cond
== PositiveOrZero
) {
1803 m_assembler
.cmppz(dest
);
1804 return branchTrue();
1807 compare32(0, dest
, Equal
);
1809 if (cond
== NonZero
) // NotEqual
1810 return branchFalse();
1811 return branchTrue();
1814 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
1816 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== PositiveOrZero
) || (cond
== Zero
) || (cond
== NonZero
));
1819 move(imm
, scratchReg3
);
1820 RegisterID destptr
= claimScratch();
1821 RegisterID destval
= claimScratch();
1822 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(dest
.m_ptr
), destptr
);
1823 m_assembler
.movlMemReg(destptr
, destval
);
1824 if (cond
== Overflow
) {
1825 m_assembler
.addvlRegReg(scratchReg3
, destval
);
1828 m_assembler
.addlRegReg(scratchReg3
, destval
);
1829 if (cond
== Signed
) {
1830 m_assembler
.cmppz(destval
);
1832 } else if (cond
== PositiveOrZero
) {
1833 m_assembler
.cmppz(destval
);
1836 m_assembler
.movImm8(0, scratchReg3
);
1837 m_assembler
.cmplRegReg(scratchReg3
, destval
, SH4Condition(cond
));
1838 result
= (cond
== Zero
);
1841 m_assembler
.movlRegMem(destval
, destptr
);
1842 releaseScratch(destval
);
1843 releaseScratch(destptr
);
1844 return result
? branchTrue() : branchFalse();
1847 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1849 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1851 if (cond
== Overflow
) {
1852 RegisterID scrsign
= claimScratch();
1853 RegisterID msbres
= claimScratch();
1854 m_assembler
.dmulslRegReg(src
, dest
);
1855 m_assembler
.stsmacl(dest
);
1856 m_assembler
.cmppz(dest
);
1857 m_assembler
.movt(scrsign
);
1858 m_assembler
.addlImm8r(-1, scrsign
);
1859 m_assembler
.stsmach(msbres
);
1860 m_assembler
.cmplRegReg(msbres
, scrsign
, SH4Condition(Equal
));
1861 releaseScratch(msbres
);
1862 releaseScratch(scrsign
);
1863 return branchFalse();
1866 m_assembler
.imullRegReg(src
, dest
);
1867 m_assembler
.stsmacl(dest
);
1868 if (cond
== Signed
) {
1869 // Check if dest is negative
1870 m_assembler
.cmppz(dest
);
1871 return branchFalse();
1874 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1876 if (cond
== NonZero
) // NotEqual
1877 return branchFalse();
1878 return branchTrue();
1881 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1883 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1885 move(imm
, scratchReg3
);
1889 return branchMul32(cond
, scratchReg3
, dest
);
1892 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1894 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1896 if (cond
== Overflow
) {
1897 m_assembler
.subvlRegReg(src
, dest
);
1898 return branchTrue();
1901 if (cond
== Signed
) {
1902 // Check if dest is negative
1903 m_assembler
.sublRegReg(src
, dest
);
1904 compare32(0, dest
, LessThan
);
1905 return branchTrue();
1909 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1911 if (cond
== NonZero
) // NotEqual
1912 return branchFalse();
1913 return branchTrue();
1916 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1918 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1920 move(imm
, scratchReg3
);
1921 return branchSub32(cond
, scratchReg3
, dest
);
1924 Jump
branchSub32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1926 move(imm
, scratchReg3
);
1929 return branchSub32(cond
, scratchReg3
, dest
);
1932 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1936 return branchSub32(cond
, src2
, dest
);
1939 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1941 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1943 if (cond
== Signed
) {
1945 compare32(0, dest
, static_cast<RelationalCondition
>(LessThan
));
1946 return branchTrue();
1950 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1952 if (cond
== NonZero
) // NotEqual
1953 return branchFalse();
1954 return branchTrue();
1957 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
, bool negZeroCheck
= true)
1959 m_assembler
.ftrcdrmfpul(src
);
1960 m_assembler
.stsfpulReg(dest
);
1961 convertInt32ToDouble(dest
, fscratch
);
1962 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fscratch
, src
));
1965 if (dest
== SH4Registers::r0
)
1966 m_assembler
.cmpEqImmR0(0, dest
);
1968 m_assembler
.movImm8(0, scratchReg3
);
1969 m_assembler
.cmplRegReg(scratchReg3
, dest
, SH4Condition(Equal
));
1971 failureCases
.append(branchTrue());
1975 void neg32(RegisterID dst
)
1977 m_assembler
.neg(dst
, dst
);
1980 void urshift32(RegisterID shiftamount
, RegisterID dest
)
1982 RegisterID shiftTmp
= claimScratch();
1983 m_assembler
.loadConstant(0x1f, shiftTmp
);
1984 m_assembler
.andlRegReg(shiftamount
, shiftTmp
);
1985 m_assembler
.neg(shiftTmp
, shiftTmp
);
1986 m_assembler
.shldRegReg(dest
, shiftTmp
);
1987 releaseScratch(shiftTmp
);
1990 void urshift32(TrustedImm32 imm
, RegisterID dest
)
1992 int immMasked
= imm
.m_value
& 0x1f;
1996 if ((immMasked
== 1) || (immMasked
== 2) || (immMasked
== 8) || (immMasked
== 16)) {
1997 m_assembler
.shlrImm8r(immMasked
, dest
);
2001 RegisterID shiftTmp
= claimScratch();
2002 m_assembler
.loadConstant(-immMasked
, shiftTmp
);
2003 m_assembler
.shldRegReg(dest
, shiftTmp
);
2004 releaseScratch(shiftTmp
);
2007 void urshift32(RegisterID src
, TrustedImm32 shiftamount
, RegisterID dest
)
2012 urshift32(shiftamount
, dest
);
2017 return Call(m_assembler
.call(), Call::Linkable
);
2022 return Call(m_assembler
.call(), Call::LinkableNear
);
2025 Call
call(RegisterID target
)
2027 return Call(m_assembler
.call(target
), Call::None
);
2030 void call(Address address
, RegisterID target
)
2032 load32(address
.base
, address
.offset
, target
);
2033 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
2034 m_assembler
.branch(JSR_OPCODE
, target
);
2044 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2046 RegisterID dataTempRegister
= claimScratch();
2048 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
2049 m_assembler
.cmplRegReg(dataTempRegister
, left
, SH4Condition(cond
));
2050 releaseScratch(dataTempRegister
);
2052 if (cond
== NotEqual
)
2053 return branchFalse();
2054 return branchTrue();
2057 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
2059 RegisterID scr
= claimScratch();
2061 m_assembler
.loadConstant(left
.offset
, scr
);
2062 m_assembler
.addlRegReg(left
.base
, scr
);
2063 m_assembler
.movlMemReg(scr
, scr
);
2064 RegisterID scr1
= claimScratch();
2065 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
2066 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
2067 releaseScratch(scr
);
2068 releaseScratch(scr1
);
2070 if (cond
== NotEqual
)
2071 return branchFalse();
2072 return branchTrue();
2081 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
2083 RegisterID scr
= claimScratch();
2084 DataLabelPtr label
= moveWithPatch(initialValue
, scr
);
2085 store32(scr
, address
);
2086 releaseScratch(scr
);
2090 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
2092 int sizeOfConstantPool()
2094 return m_assembler
.sizeOfConstantPool();
2097 Call
tailRecursiveCall()
2099 RegisterID scr
= claimScratch();
2101 m_assembler
.loadConstantUnReusable(0x0, scr
, true);
2102 Jump m_jump
= Jump(m_assembler
.jmp(scr
));
2103 releaseScratch(scr
);
2105 return Call::fromTailJump(m_jump
);
2108 Call
makeTailRecursiveCall(Jump oldJump
)
2111 return tailRecursiveCall();
2119 static FunctionPtr
readCallTarget(CodeLocationCall call
)
2121 return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call
.dataLocation())));
2124 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
2126 SH4Assembler::replaceWithJump(instructionStart
.dataLocation(), destination
.dataLocation());
2129 static ptrdiff_t maxJumpReplacementSize()
2131 return SH4Assembler::maxJumpReplacementSize();
2134 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2136 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
2138 return label
.labelAtOffset(0);
2141 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
)
2143 SH4Assembler::revertJump(instructionStart
.dataLocation(), initialValue
);
2146 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr
)
2148 UNREACHABLE_FOR_PLATFORM();
2149 return CodeLocationLabel();
2152 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart
, Address
, void* initialValue
)
2154 UNREACHABLE_FOR_PLATFORM();
2158 SH4Assembler::Condition
SH4Condition(RelationalCondition cond
)
2160 return static_cast<SH4Assembler::Condition
>(cond
);
2163 SH4Assembler::Condition
SH4Condition(ResultCondition cond
)
2165 return static_cast<SH4Assembler::Condition
>(cond
);
2168 friend class LinkBuffer
;
2169 friend class RepatchBuffer
;
2171 static void linkCall(void* code
, Call call
, FunctionPtr function
)
2173 SH4Assembler::linkCall(code
, call
.m_label
, function
.value());
2176 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
2178 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2181 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
2183 SH4Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
2189 #endif // ENABLE(ASSEMBLER)
2191 #endif // MacroAssemblerSH4_h