2 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
3 * Copyright (C) 2008 Apple Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef MacroAssemblerSH4_h
28 #define MacroAssemblerSH4_h
30 #if ENABLE(ASSEMBLER) && CPU(SH4)
32 #include "AbstractMacroAssembler.h"
33 #include "SH4Assembler.h"
34 #include <wtf/Assertions.h>
38 class MacroAssemblerSH4
: public AbstractMacroAssembler
<SH4Assembler
> {
40 typedef SH4Assembler::FPRegisterID FPRegisterID
;
42 static const Scale ScalePtr
= TimesFour
;
43 static const FPRegisterID fscratch
= SH4Registers::fr10
;
44 static const RegisterID stackPointerRegister
= SH4Registers::sp
;
45 static const RegisterID linkRegister
= SH4Registers::pr
;
46 static const RegisterID scratchReg3
= SH4Registers::r13
;
48 static const int MaximumCompactPtrAlignedAddressOffset
= 0x7FFFFFFF;
50 enum RelationalCondition
{
51 Equal
= SH4Assembler::EQ
,
52 NotEqual
= SH4Assembler::NE
,
53 Above
= SH4Assembler::HI
,
54 AboveOrEqual
= SH4Assembler::HS
,
55 Below
= SH4Assembler::LI
,
56 BelowOrEqual
= SH4Assembler::LS
,
57 GreaterThan
= SH4Assembler::GT
,
58 GreaterThanOrEqual
= SH4Assembler::GE
,
59 LessThan
= SH4Assembler::LT
,
60 LessThanOrEqual
= SH4Assembler::LE
63 enum ResultCondition
{
64 Overflow
= SH4Assembler::OF
,
65 Signed
= SH4Assembler::SI
,
66 Zero
= SH4Assembler::EQ
,
67 NonZero
= SH4Assembler::NE
70 enum DoubleCondition
{
71 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
72 DoubleEqual
= SH4Assembler::EQ
,
73 DoubleNotEqual
= SH4Assembler::NE
,
74 DoubleGreaterThan
= SH4Assembler::GT
,
75 DoubleGreaterThanOrEqual
= SH4Assembler::GE
,
76 DoubleLessThan
= SH4Assembler::LT
,
77 DoubleLessThanOrEqual
= SH4Assembler::LE
,
78 // If either operand is NaN, these conditions always evaluate to true.
79 DoubleEqualOrUnordered
= SH4Assembler::EQU
,
80 DoubleNotEqualOrUnordered
= SH4Assembler::NEU
,
81 DoubleGreaterThanOrUnordered
= SH4Assembler::GTU
,
82 DoubleGreaterThanOrEqualOrUnordered
= SH4Assembler::GEU
,
83 DoubleLessThanOrUnordered
= SH4Assembler::LTU
,
84 DoubleLessThanOrEqualOrUnordered
= SH4Assembler::LEU
,
87 RegisterID
claimScratch()
89 return m_assembler
.claimScratch();
92 void releaseScratch(RegisterID reg
)
94 m_assembler
.releaseScratch(reg
);
97 // Integer arithmetic operations
99 void add32(RegisterID src
, RegisterID dest
)
101 m_assembler
.addlRegReg(src
, dest
);
104 void add32(TrustedImm32 imm
, RegisterID dest
)
106 if (m_assembler
.isImmediate(imm
.m_value
)) {
107 m_assembler
.addlImm8r(imm
.m_value
, dest
);
111 RegisterID scr
= claimScratch();
112 m_assembler
.loadConstant(imm
.m_value
, scr
);
113 m_assembler
.addlRegReg(scr
, dest
);
117 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
120 m_assembler
.movlRegReg(src
, dest
);
124 void add32(TrustedImm32 imm
, Address address
)
126 RegisterID scr
= claimScratch();
127 load32(address
, scr
);
129 store32(scr
, address
);
133 void add32(Address src
, RegisterID dest
)
135 RegisterID scr
= claimScratch();
137 m_assembler
.addlRegReg(scr
, dest
);
141 void and32(RegisterID src
, RegisterID dest
)
143 m_assembler
.andlRegReg(src
, dest
);
146 void and32(TrustedImm32 imm
, RegisterID dest
)
148 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
149 m_assembler
.andlImm8r(imm
.m_value
, dest
);
153 RegisterID scr
= claimScratch();
154 m_assembler
.loadConstant((imm
.m_value
), scr
);
155 m_assembler
.andlRegReg(scr
, dest
);
159 void lshift32(RegisterID shiftamount
, RegisterID dest
)
161 m_assembler
.shllRegReg(dest
, shiftamount
);
164 void rshift32(int imm
, RegisterID dest
)
166 RegisterID scr
= claimScratch();
167 m_assembler
.loadConstant(-imm
, scr
);
168 m_assembler
.shaRegReg(dest
, scr
);
172 void lshift32(TrustedImm32 imm
, RegisterID dest
)
174 if ((imm
.m_value
== 1) || (imm
.m_value
== 2) || (imm
.m_value
== 8) || (imm
.m_value
== 16)) {
175 m_assembler
.shllImm8r(imm
.m_value
, dest
);
179 RegisterID scr
= claimScratch();
180 m_assembler
.loadConstant(imm
.m_value
, scr
);
181 m_assembler
.shllRegReg(dest
, scr
);
185 void mul32(RegisterID src
, RegisterID dest
)
187 m_assembler
.imullRegReg(src
, dest
);
188 m_assembler
.stsmacl(dest
);
191 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
193 RegisterID scr
= claimScratch();
201 void not32(RegisterID src
, RegisterID dest
)
203 m_assembler
.notlReg(src
, dest
);
206 void or32(RegisterID src
, RegisterID dest
)
208 m_assembler
.orlRegReg(src
, dest
);
211 void or32(TrustedImm32 imm
, RegisterID dest
)
213 if ((imm
.m_value
<= 255) && (imm
.m_value
>= 0) && (dest
== SH4Registers::r0
)) {
214 m_assembler
.orlImm8r(imm
.m_value
, dest
);
218 RegisterID scr
= claimScratch();
219 m_assembler
.loadConstant(imm
.m_value
, scr
);
220 m_assembler
.orlRegReg(scr
, dest
);
224 void rshift32(RegisterID shiftamount
, RegisterID dest
)
226 compare32(32, shiftamount
, Equal
);
227 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
228 m_assembler
.branch(BT_OPCODE
, 1);
229 m_assembler
.neg(shiftamount
, shiftamount
);
230 m_assembler
.shaRegReg(dest
, shiftamount
);
233 void rshift32(TrustedImm32 imm
, RegisterID dest
)
235 if (imm
.m_value
& 0x1f)
236 rshift32(imm
.m_value
& 0x1f, dest
);
239 void sub32(RegisterID src
, RegisterID dest
)
241 m_assembler
.sublRegReg(src
, dest
);
244 void sub32(TrustedImm32 imm
, AbsoluteAddress address
, RegisterID scratchReg
)
246 RegisterID result
= claimScratch();
248 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
249 m_assembler
.movlMemReg(scratchReg
, result
);
251 if (m_assembler
.isImmediate(-imm
.m_value
))
252 m_assembler
.addlImm8r(-imm
.m_value
, result
);
254 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
255 m_assembler
.sublRegReg(scratchReg3
, result
);
258 store32(result
, scratchReg
);
259 releaseScratch(result
);
262 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
264 RegisterID result
= claimScratch();
265 RegisterID scratchReg
= claimScratch();
267 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
268 m_assembler
.movlMemReg(scratchReg
, result
);
270 if (m_assembler
.isImmediate(-imm
.m_value
))
271 m_assembler
.addlImm8r(-imm
.m_value
, result
);
273 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
274 m_assembler
.sublRegReg(scratchReg3
, result
);
277 store32(result
, scratchReg
);
278 releaseScratch(result
);
279 releaseScratch(scratchReg
);
282 void add32(TrustedImm32 imm
, AbsoluteAddress address
, RegisterID scratchReg
)
284 RegisterID result
= claimScratch();
286 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
287 m_assembler
.movlMemReg(scratchReg
, result
);
289 if (m_assembler
.isImmediate(imm
.m_value
))
290 m_assembler
.addlImm8r(imm
.m_value
, result
);
292 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
293 m_assembler
.addlRegReg(scratchReg3
, result
);
296 store32(result
, scratchReg
);
297 releaseScratch(result
);
300 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
302 RegisterID result
= claimScratch();
303 RegisterID scratchReg
= claimScratch();
305 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
.m_ptr
), scratchReg
);
306 m_assembler
.movlMemReg(scratchReg
, result
);
308 if (m_assembler
.isImmediate(imm
.m_value
))
309 m_assembler
.addlImm8r(imm
.m_value
, result
);
311 m_assembler
.loadConstant(imm
.m_value
, scratchReg3
);
312 m_assembler
.addlRegReg(scratchReg3
, result
);
315 store32(result
, scratchReg
);
316 releaseScratch(result
);
317 releaseScratch(scratchReg
);
320 void sub32(TrustedImm32 imm
, RegisterID dest
)
322 if (m_assembler
.isImmediate(-imm
.m_value
)) {
323 m_assembler
.addlImm8r(-imm
.m_value
, dest
);
327 RegisterID scr
= claimScratch();
328 m_assembler
.loadConstant(imm
.m_value
, scr
);
329 m_assembler
.sublRegReg(scr
, dest
);
333 void sub32(Address src
, RegisterID dest
)
335 RegisterID scr
= claimScratch();
337 m_assembler
.sublRegReg(scr
, dest
);
341 void xor32(RegisterID src
, RegisterID dest
)
343 m_assembler
.xorlRegReg(src
, dest
);
346 void xor32(TrustedImm32 imm
, RegisterID srcDest
)
348 if ((srcDest
!= SH4Registers::r0
) || (imm
.m_value
> 255) || (imm
.m_value
< 0)) {
349 RegisterID scr
= claimScratch();
350 m_assembler
.loadConstant((imm
.m_value
), scr
);
351 m_assembler
.xorlRegReg(scr
, srcDest
);
356 m_assembler
.xorlImm8r(imm
.m_value
, srcDest
);
359 void compare32(int imm
, RegisterID dst
, RelationalCondition cond
)
361 if (((cond
== Equal
) || (cond
== NotEqual
)) && (dst
== SH4Registers::r0
) && m_assembler
.isImmediate(imm
)) {
362 m_assembler
.cmpEqImmR0(imm
, dst
);
366 RegisterID scr
= claimScratch();
367 m_assembler
.loadConstant(imm
, scr
);
368 m_assembler
.cmplRegReg(scr
, dst
, SH4Condition(cond
));
372 void compare32(int offset
, RegisterID base
, RegisterID left
, RelationalCondition cond
)
374 RegisterID scr
= claimScratch();
376 m_assembler
.movlMemReg(base
, scr
);
377 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
382 if ((offset
< 0) || (offset
>= 64)) {
383 m_assembler
.loadConstant(offset
, scr
);
384 m_assembler
.addlRegReg(base
, scr
);
385 m_assembler
.movlMemReg(scr
, scr
);
386 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
391 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
392 m_assembler
.cmplRegReg(scr
, left
, SH4Condition(cond
));
396 void testImm(int imm
, int offset
, RegisterID base
)
398 RegisterID scr
= claimScratch();
399 RegisterID scr1
= claimScratch();
401 if ((offset
< 0) || (offset
>= 64)) {
402 m_assembler
.loadConstant(offset
, scr
);
403 m_assembler
.addlRegReg(base
, scr
);
404 m_assembler
.movlMemReg(scr
, scr
);
406 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
408 m_assembler
.movlMemReg(base
, scr
);
409 if (m_assembler
.isImmediate(imm
))
410 m_assembler
.movImm8(imm
, scr1
);
412 m_assembler
.loadConstant(imm
, scr1
);
414 m_assembler
.testlRegReg(scr
, scr1
);
416 releaseScratch(scr1
);
419 void testlImm(int imm
, RegisterID dst
)
421 if ((dst
== SH4Registers::r0
) && (imm
<= 255) && (imm
>= 0)) {
422 m_assembler
.testlImm8r(imm
, dst
);
426 RegisterID scr
= claimScratch();
427 m_assembler
.loadConstant(imm
, scr
);
428 m_assembler
.testlRegReg(scr
, dst
);
432 void compare32(RegisterID right
, int offset
, RegisterID base
, RelationalCondition cond
)
435 RegisterID scr
= claimScratch();
436 m_assembler
.movlMemReg(base
, scr
);
437 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
442 if ((offset
< 0) || (offset
>= 64)) {
443 RegisterID scr
= claimScratch();
444 m_assembler
.loadConstant(offset
, scr
);
445 m_assembler
.addlRegReg(base
, scr
);
446 m_assembler
.movlMemReg(scr
, scr
);
447 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
452 RegisterID scr
= claimScratch();
453 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
454 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
458 void compare32(int imm
, int offset
, RegisterID base
, RelationalCondition cond
)
461 RegisterID scr
= claimScratch();
462 RegisterID scr1
= claimScratch();
463 m_assembler
.movlMemReg(base
, scr
);
464 m_assembler
.loadConstant(imm
, scr1
);
465 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
466 releaseScratch(scr1
);
471 if ((offset
< 0) || (offset
>= 64)) {
472 RegisterID scr
= claimScratch();
473 RegisterID scr1
= claimScratch();
474 m_assembler
.loadConstant(offset
, scr
);
475 m_assembler
.addlRegReg(base
, scr
);
476 m_assembler
.movlMemReg(scr
, scr
);
477 m_assembler
.loadConstant(imm
, scr1
);
478 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
479 releaseScratch(scr1
);
484 RegisterID scr
= claimScratch();
485 RegisterID scr1
= claimScratch();
486 m_assembler
.movlMemReg(offset
>> 2, base
, scr
);
487 m_assembler
.loadConstant(imm
, scr1
);
488 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
489 releaseScratch(scr1
);
493 // Memory access operation
495 void load32(ImplicitAddress address
, RegisterID dest
)
497 load32(address
.base
, address
.offset
, dest
);
500 void load8(ImplicitAddress address
, RegisterID dest
)
502 load8(address
.base
, address
.offset
, dest
);
505 void load32(BaseIndex address
, RegisterID dest
)
507 RegisterID scr
= claimScratch();
508 move(address
.index
, scr
);
509 lshift32(TrustedImm32(address
.scale
), scr
);
510 add32(address
.base
, scr
);
511 load32(scr
, address
.offset
, dest
);
515 void load32(void* address
, RegisterID dest
)
517 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), dest
);
518 m_assembler
.movlMemReg(dest
, dest
);
521 void load32(RegisterID base
, int offset
, RegisterID dest
)
524 m_assembler
.movlMemReg(base
, dest
);
528 if ((offset
>= 0) && (offset
< 64)) {
529 m_assembler
.movlMemReg(offset
>> 2, base
, dest
);
533 if ((dest
== SH4Registers::r0
) && (dest
!= base
)) {
534 m_assembler
.loadConstant((offset
), dest
);
535 m_assembler
.movlR0mr(base
, dest
);
541 scr
= claimScratch();
544 m_assembler
.loadConstant((offset
), scr
);
545 m_assembler
.addlRegReg(base
, scr
);
546 m_assembler
.movlMemReg(scr
, dest
);
552 void load8(RegisterID base
, int offset
, RegisterID dest
)
555 m_assembler
.movbMemReg(base
, dest
);
559 if ((offset
> 0) && (offset
< 64) && (dest
== SH4Registers::r0
)) {
560 m_assembler
.movbMemReg(offset
, base
, dest
);
565 m_assembler
.loadConstant((offset
), dest
);
566 m_assembler
.addlRegReg(base
, dest
);
567 m_assembler
.movbMemReg(dest
, dest
);
571 RegisterID scr
= claimScratch();
572 m_assembler
.loadConstant((offset
), scr
);
573 m_assembler
.addlRegReg(base
, scr
);
574 m_assembler
.movbMemReg(scr
, dest
);
578 void load32(RegisterID r0
, RegisterID src
, RegisterID dst
)
580 ASSERT(r0
== SH4Registers::r0
);
581 m_assembler
.movlR0mr(src
, dst
);
584 void load32(RegisterID src
, RegisterID dst
)
586 m_assembler
.movlMemReg(src
, dst
);
589 void load16(ImplicitAddress address
, RegisterID dest
)
591 if (!address
.offset
) {
592 m_assembler
.movwMemReg(address
.base
, dest
);
596 if ((address
.offset
> 0) && (address
.offset
< 64) && (dest
== SH4Registers::r0
)) {
597 m_assembler
.movwMemReg(address
.offset
, address
.base
, dest
);
601 if (address
.base
!= dest
) {
602 m_assembler
.loadConstant((address
.offset
), dest
);
603 m_assembler
.addlRegReg(address
.base
, dest
);
604 m_assembler
.movwMemReg(dest
, dest
);
608 RegisterID scr
= claimScratch();
609 m_assembler
.loadConstant((address
.offset
), scr
);
610 m_assembler
.addlRegReg(address
.base
, scr
);
611 m_assembler
.movwMemReg(scr
, dest
);
615 void load16(RegisterID src
, RegisterID dest
)
617 m_assembler
.movwMemReg(src
, dest
);
620 void load16(RegisterID r0
, RegisterID src
, RegisterID dest
)
622 ASSERT(r0
== SH4Registers::r0
);
623 m_assembler
.movwR0mr(src
, dest
);
626 void load16(BaseIndex address
, RegisterID dest
)
628 RegisterID scr
= claimScratch();
630 move(address
.index
, scr
);
631 lshift32(TrustedImm32(address
.scale
), scr
);
634 add32(TrustedImm32(address
.offset
), scr
);
635 if (scr
== SH4Registers::r0
)
636 m_assembler
.movwR0mr(address
.base
, scr
);
638 add32(address
.base
, scr
);
646 void store32(RegisterID src
, ImplicitAddress address
)
648 RegisterID scr
= claimScratch();
649 store32(src
, address
.offset
, address
.base
, scr
);
653 void store32(RegisterID src
, int offset
, RegisterID base
, RegisterID scr
)
656 m_assembler
.movlRegMem(src
, base
);
660 if ((offset
>=0) && (offset
< 64)) {
661 m_assembler
.movlRegMem(src
, offset
>> 2, base
);
665 m_assembler
.loadConstant((offset
), scr
);
666 if (scr
== SH4Registers::r0
) {
667 m_assembler
.movlRegMemr0(src
, base
);
671 m_assembler
.addlRegReg(base
, scr
);
672 m_assembler
.movlRegMem(src
, scr
);
675 void store32(RegisterID src
, RegisterID offset
, RegisterID base
)
677 ASSERT(offset
== SH4Registers::r0
);
678 m_assembler
.movlRegMemr0(src
, base
);
681 void store32(RegisterID src
, RegisterID dst
)
683 m_assembler
.movlRegMem(src
, dst
);
686 void store32(TrustedImm32 imm
, ImplicitAddress address
)
688 RegisterID scr
= claimScratch();
689 RegisterID scr1
= claimScratch();
690 m_assembler
.loadConstant((imm
.m_value
), scr
);
691 store32(scr
, address
.offset
, address
.base
, scr1
);
693 releaseScratch(scr1
);
696 void store32(RegisterID src
, BaseIndex address
)
698 RegisterID scr
= claimScratch();
700 move(address
.index
, scr
);
701 lshift32(TrustedImm32(address
.scale
), scr
);
702 add32(address
.base
, scr
);
703 store32(src
, Address(scr
, address
.offset
));
708 void store32(TrustedImm32 imm
, void* address
)
710 RegisterID scr
= claimScratch();
711 RegisterID scr1
= claimScratch();
712 m_assembler
.loadConstant((imm
.m_value
), scr
);
713 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr1
);
714 m_assembler
.movlMemReg(scr
, scr1
);
716 releaseScratch(scr1
);
719 void store32(RegisterID src
, void* address
)
721 RegisterID scr
= claimScratch();
722 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
723 m_assembler
.movlMemReg(src
, scr
);
727 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
729 RegisterID scr
= claimScratch();
730 DataLabel32
label(this);
731 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
732 m_assembler
.addlRegReg(address
.base
, scr
);
733 m_assembler
.movlMemReg(scr
, dest
);
738 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
740 RegisterID scr
= claimScratch();
741 DataLabelCompact
label(this);
742 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
743 m_assembler
.addlRegReg(address
.base
, scr
);
744 m_assembler
.movlMemReg(scr
, dest
);
749 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
751 RegisterID scr
= claimScratch();
752 DataLabel32
label(this);
753 m_assembler
.loadConstantUnReusable(address
.offset
, scr
);
754 m_assembler
.addlRegReg(address
.base
, scr
);
755 m_assembler
.movlRegMem(src
, scr
);
760 // Floating-point operations
762 bool supportsFloatingPoint() const { return true; }
763 bool supportsFloatingPointTruncate() const { return true; }
764 bool supportsFloatingPointSqrt() const { return true; }
766 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
768 RegisterID scr
= claimScratch();
770 m_assembler
.loadConstant(address
.offset
, scr
);
771 if (address
.base
== SH4Registers::r0
) {
772 m_assembler
.fmovsReadr0r(scr
, (FPRegisterID
)(dest
+ 1));
773 m_assembler
.addlImm8r(4, scr
);
774 m_assembler
.fmovsReadr0r(scr
, dest
);
779 m_assembler
.addlRegReg(address
.base
, scr
);
780 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
781 m_assembler
.fmovsReadrm(scr
, dest
);
785 void loadDouble(const void* address
, FPRegisterID dest
)
787 RegisterID scr
= claimScratch();
788 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(address
), scr
);
789 m_assembler
.fmovsReadrminc(scr
, (FPRegisterID
)(dest
+ 1));
790 m_assembler
.fmovsReadrm(scr
, dest
);
794 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
796 RegisterID scr
= claimScratch();
797 m_assembler
.loadConstant(address
.offset
, scr
);
798 m_assembler
.addlRegReg(address
.base
, scr
);
799 m_assembler
.fmovsWriterm((FPRegisterID
)(src
+ 1), scr
);
800 m_assembler
.addlImm8r(4, scr
);
801 m_assembler
.fmovsWriterm(src
, scr
);
805 void addDouble(FPRegisterID src
, FPRegisterID dest
)
807 m_assembler
.daddRegReg(src
, dest
);
810 void addDouble(Address address
, FPRegisterID dest
)
812 loadDouble(address
, fscratch
);
813 addDouble(fscratch
, dest
);
816 void subDouble(FPRegisterID src
, FPRegisterID dest
)
818 m_assembler
.dsubRegReg(src
, dest
);
821 void subDouble(Address address
, FPRegisterID dest
)
823 loadDouble(address
, fscratch
);
824 subDouble(fscratch
, dest
);
827 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
829 m_assembler
.dmulRegReg(src
, dest
);
832 void mulDouble(Address address
, FPRegisterID dest
)
834 loadDouble(address
, fscratch
);
835 mulDouble(fscratch
, dest
);
838 void divDouble(FPRegisterID src
, FPRegisterID dest
)
840 m_assembler
.ddivRegReg(src
, dest
);
843 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
845 m_assembler
.ldsrmfpul(src
);
846 m_assembler
.floatfpulDreg(dest
);
849 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
851 RegisterID scr
= claimScratch();
852 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(src
.m_ptr
), scr
);
853 convertInt32ToDouble(scr
, dest
);
857 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
859 RegisterID scr
= claimScratch();
861 convertInt32ToDouble(scr
, dest
);
865 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
867 RegisterID scr
= claimScratch();
869 move(address
.index
, scr
);
870 lshift32(TrustedImm32(address
.scale
), scr
);
871 add32(address
.base
, scr
);
874 add32(TrustedImm32(address
.offset
), scr
);
876 RegisterID scr1
= claimScratch();
878 add32(TrustedImm32(2), scr
);
880 move(TrustedImm32(16), scr
);
881 m_assembler
.shllRegReg(dest
, scr
);
885 releaseScratch(scr1
);
888 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
890 RegisterID scr
= scratchReg3
;
891 load32WithUnalignedHalfWords(left
, scr
);
892 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
893 m_assembler
.testlRegReg(scr
, scr
);
895 compare32(right
.m_value
, scr
, cond
);
897 if (cond
== NotEqual
)
898 return branchFalse();
902 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
904 m_assembler
.movImm8(0, scratchReg3
);
905 convertInt32ToDouble(scratchReg3
, scratch
);
906 return branchDouble(DoubleNotEqual
, reg
, scratch
);
909 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
911 m_assembler
.movImm8(0, scratchReg3
);
912 convertInt32ToDouble(scratchReg3
, scratch
);
913 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
916 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
918 if (cond
== DoubleEqual
) {
919 m_assembler
.dcmppeq(right
, left
);
923 if (cond
== DoubleNotEqual
) {
924 RegisterID scr
= claimScratch();
925 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
926 m_assembler
.dcnvds(right
);
927 m_assembler
.stsfpulReg(scr
);
928 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
929 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
930 m_assembler
.branch(BT_OPCODE
, 8);
931 m_assembler
.dcnvds(left
);
932 m_assembler
.stsfpulReg(scr
);
933 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
934 m_assembler
.branch(BT_OPCODE
, 4);
935 m_assembler
.dcmppeq(right
, left
);
937 return branchFalse();
940 if (cond
== DoubleGreaterThan
) {
941 m_assembler
.dcmppgt(right
, left
);
945 if (cond
== DoubleGreaterThanOrEqual
) {
946 m_assembler
.dcmppgt(left
, right
);
947 return branchFalse();
950 if (cond
== DoubleLessThan
) {
951 m_assembler
.dcmppgt(left
, right
);
955 if (cond
== DoubleLessThanOrEqual
) {
956 m_assembler
.dcmppgt(right
, left
);
957 return branchFalse();
960 if (cond
== DoubleEqualOrUnordered
) {
961 RegisterID scr
= claimScratch();
962 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
963 m_assembler
.dcnvds(right
);
964 m_assembler
.stsfpulReg(scr
);
965 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
966 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
967 m_assembler
.branch(BT_OPCODE
, 5);
968 m_assembler
.dcnvds(left
);
969 m_assembler
.stsfpulReg(scr
);
970 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
971 m_assembler
.branch(BT_OPCODE
, 1);
972 m_assembler
.dcmppeq(left
, right
);
977 if (cond
== DoubleGreaterThanOrUnordered
) {
978 RegisterID scr
= claimScratch();
979 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
980 m_assembler
.dcnvds(right
);
981 m_assembler
.stsfpulReg(scr
);
982 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
983 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
984 m_assembler
.branch(BT_OPCODE
, 5);
985 m_assembler
.dcnvds(left
);
986 m_assembler
.stsfpulReg(scr
);
987 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
988 m_assembler
.branch(BT_OPCODE
, 1);
989 m_assembler
.dcmppgt(right
, left
);
994 if (cond
== DoubleGreaterThanOrEqualOrUnordered
) {
995 RegisterID scr
= claimScratch();
996 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
997 m_assembler
.dcnvds(right
);
998 m_assembler
.stsfpulReg(scr
);
999 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1000 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1001 m_assembler
.branch(BT_OPCODE
, 5);
1002 m_assembler
.dcnvds(left
);
1003 m_assembler
.stsfpulReg(scr
);
1004 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1005 m_assembler
.branch(BT_OPCODE
, 1);
1006 m_assembler
.dcmppgt(left
, right
);
1007 releaseScratch(scr
);
1008 return branchFalse();
1011 if (cond
== DoubleLessThanOrUnordered
) {
1012 RegisterID scr
= claimScratch();
1013 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1014 m_assembler
.dcnvds(right
);
1015 m_assembler
.stsfpulReg(scr
);
1016 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1017 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1018 m_assembler
.branch(BT_OPCODE
, 5);
1019 m_assembler
.dcnvds(left
);
1020 m_assembler
.stsfpulReg(scr
);
1021 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1022 m_assembler
.branch(BT_OPCODE
, 1);
1023 m_assembler
.dcmppgt(left
, right
);
1024 releaseScratch(scr
);
1025 return branchTrue();
1028 if (cond
== DoubleLessThanOrEqualOrUnordered
) {
1029 RegisterID scr
= claimScratch();
1030 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1031 m_assembler
.dcnvds(right
);
1032 m_assembler
.stsfpulReg(scr
);
1033 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1034 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1035 m_assembler
.branch(BT_OPCODE
, 5);
1036 m_assembler
.dcnvds(left
);
1037 m_assembler
.stsfpulReg(scr
);
1038 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1039 m_assembler
.branch(BT_OPCODE
, 1);
1040 m_assembler
.dcmppgt(right
, left
);
1041 releaseScratch(scr
);
1042 return branchFalse();
1045 ASSERT(cond
== DoubleNotEqualOrUnordered
);
1046 RegisterID scr
= claimScratch();
1047 m_assembler
.loadConstant(0x7fbfffff, scratchReg3
);
1048 m_assembler
.dcnvds(right
);
1049 m_assembler
.stsfpulReg(scr
);
1050 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1051 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 22, sizeof(uint32_t));
1052 m_assembler
.branch(BT_OPCODE
, 5);
1053 m_assembler
.dcnvds(left
);
1054 m_assembler
.stsfpulReg(scr
);
1055 m_assembler
.cmplRegReg(scratchReg3
, scr
, SH4Condition(Equal
));
1056 m_assembler
.branch(BT_OPCODE
, 1);
1057 m_assembler
.dcmppeq(right
, left
);
1058 releaseScratch(scr
);
1059 return branchFalse();
1064 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1065 Jump m_jump
= Jump(m_assembler
.je());
1066 m_assembler
.loadConstantUnReusable(0x0, scratchReg3
);
1074 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 6, sizeof(uint32_t));
1075 Jump m_jump
= Jump(m_assembler
.jne());
1076 m_assembler
.loadConstantUnReusable(0x0, scratchReg3
);
1082 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1084 RegisterID scr
= claimScratch();
1085 move(left
.index
, scr
);
1086 lshift32(TrustedImm32(left
.scale
), scr
);
1087 add32(left
.base
, scr
);
1088 load32(scr
, left
.offset
, scr
);
1089 compare32(right
.m_value
, scr
, cond
);
1090 releaseScratch(scr
);
1092 if (cond
== NotEqual
)
1093 return branchFalse();
1094 return branchTrue();
1097 void sqrtDouble(FPRegisterID src
, FPRegisterID dest
)
1100 m_assembler
.dmovRegReg(src
, dest
);
1101 m_assembler
.dsqrt(dest
);
1104 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1106 RegisterID addressTempRegister
= claimScratch();
1107 load8(address
, addressTempRegister
);
1108 Jump jmp
= branchTest32(cond
, addressTempRegister
, mask
);
1109 releaseScratch(addressTempRegister
);
1113 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1119 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1121 RegisterID addressTempRegister
= claimScratch();
1122 load8(left
, addressTempRegister
);
1123 Jump jmp
= branch32(cond
, addressTempRegister
, right
);
1124 releaseScratch(addressTempRegister
);
1128 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
1130 m_assembler
.ftrcdrmfpul(src
);
1131 m_assembler
.stsfpulReg(dest
);
1132 m_assembler
.loadConstant(0x7fffffff, scratchReg3
);
1133 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1134 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 14, sizeof(uint32_t));
1135 m_assembler
.branch(BT_OPCODE
, 2);
1136 m_assembler
.addlImm8r(1, scratchReg3
);
1137 m_assembler
.cmplRegReg(dest
, scratchReg3
, SH4Condition(Equal
));
1138 return branchTrue();
1141 // Stack manipulation operations
1143 void pop(RegisterID dest
)
1145 m_assembler
.popReg(dest
);
1148 void push(RegisterID src
)
1150 m_assembler
.pushReg(src
);
1153 void push(Address address
)
1155 if (!address
.offset
) {
1160 if ((address
.offset
< 0) || (address
.offset
>= 64)) {
1161 RegisterID scr
= claimScratch();
1162 m_assembler
.loadConstant(address
.offset
, scr
);
1163 m_assembler
.addlRegReg(address
.base
, scr
);
1164 m_assembler
.movlMemReg(scr
, SH4Registers::sp
);
1165 m_assembler
.addlImm8r(-4, SH4Registers::sp
);
1166 releaseScratch(scr
);
1170 m_assembler
.movlMemReg(address
.offset
>> 2, address
.base
, SH4Registers::sp
);
1171 m_assembler
.addlImm8r(-4, SH4Registers::sp
);
1174 void push(TrustedImm32 imm
)
1176 RegisterID scr
= claimScratch();
1177 m_assembler
.loadConstant(imm
.m_value
, scr
);
1179 releaseScratch(scr
);
1182 // Register move operations
1184 void move(TrustedImm32 imm
, RegisterID dest
)
1186 m_assembler
.loadConstant(imm
.m_value
, dest
);
1189 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
1191 DataLabelPtr
dataLabel(this);
1192 m_assembler
.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue
.m_value
), dest
, true);
1196 void move(RegisterID src
, RegisterID dest
)
1198 m_assembler
.movlRegReg(src
, dest
);
1201 void move(TrustedImmPtr imm
, RegisterID dest
)
1203 m_assembler
.loadConstant(imm
.asIntptr(), dest
);
1206 void extuw(RegisterID src
, RegisterID dst
)
1208 m_assembler
.extuw(src
, dst
);
1211 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1213 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1214 if (cond
!= NotEqual
) {
1215 m_assembler
.movt(dest
);
1219 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1220 m_assembler
.movImm8(0, dest
);
1221 m_assembler
.branch(BT_OPCODE
, 0);
1222 m_assembler
.movImm8(1, dest
);
1225 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1229 compare32(cond
, left
, dest
, dest
);
1233 RegisterID scr
= claimScratch();
1235 compare32(cond
, left
, scr
, dest
);
1236 releaseScratch(scr
);
1239 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1241 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1243 load8(address
, dest
);
1244 if (mask
.m_value
== -1)
1245 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1247 testlImm(mask
.m_value
, dest
);
1248 if (cond
!= NonZero
) {
1249 m_assembler
.movt(dest
);
1253 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1254 m_assembler
.movImm8(0, dest
);
1255 m_assembler
.branch(BT_OPCODE
, 0);
1256 m_assembler
.movImm8(1, dest
);
1259 void loadPtrLinkReg(ImplicitAddress address
)
1261 RegisterID scr
= claimScratch();
1262 load32(address
, scr
);
1263 m_assembler
.ldspr(scr
);
1264 releaseScratch(scr
);
1267 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1269 m_assembler
.cmplRegReg(right
, left
, SH4Condition(cond
));
1270 /* BT label => BF off
1275 if (cond
== NotEqual
)
1276 return branchFalse();
1277 return branchTrue();
1280 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1282 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1283 m_assembler
.testlRegReg(left
, left
);
1285 compare32(right
.m_value
, left
, cond
);
1287 if (cond
== NotEqual
)
1288 return branchFalse();
1289 return branchTrue();
1292 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1294 compare32(right
.offset
, right
.base
, left
, cond
);
1295 if (cond
== NotEqual
)
1296 return branchFalse();
1297 return branchTrue();
1300 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1302 compare32(right
, left
.offset
, left
.base
, cond
);
1303 if (cond
== NotEqual
)
1304 return branchFalse();
1305 return branchTrue();
1308 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1310 compare32(right
.m_value
, left
.offset
, left
.base
, cond
);
1311 if (cond
== NotEqual
)
1312 return branchFalse();
1313 return branchTrue();
1316 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
1318 RegisterID scr
= claimScratch();
1320 move(TrustedImm32(reinterpret_cast<uint32_t>(left
.m_ptr
)), scr
);
1321 m_assembler
.cmplRegReg(right
, scr
, SH4Condition(cond
));
1322 releaseScratch(scr
);
1324 if (cond
== NotEqual
)
1325 return branchFalse();
1326 return branchTrue();
1329 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
1331 RegisterID addressTempRegister
= claimScratch();
1333 m_assembler
.loadConstant(reinterpret_cast<uint32_t>(left
.m_ptr
), addressTempRegister
);
1334 m_assembler
.movlMemReg(addressTempRegister
, addressTempRegister
);
1335 compare32(right
.m_value
, addressTempRegister
, cond
);
1336 releaseScratch(addressTempRegister
);
1338 if (cond
== NotEqual
)
1339 return branchFalse();
1340 return branchTrue();
1343 Jump
branch16(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
1345 RegisterID scr
= claimScratch();
1347 move(left
.index
, scr
);
1348 lshift32(TrustedImm32(left
.scale
), scr
);
1351 add32(TrustedImm32(left
.offset
), scr
);
1352 add32(left
.base
, scr
);
1355 releaseScratch(scr
);
1357 return branch32(cond
, scr
, right
);
1360 Jump
branch16(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1362 RegisterID scr
= claimScratch();
1364 move(left
.index
, scr
);
1365 lshift32(TrustedImm32(left
.scale
), scr
);
1368 add32(TrustedImm32(left
.offset
), scr
);
1369 add32(left
.base
, scr
);
1372 RegisterID scr1
= claimScratch();
1373 m_assembler
.loadConstant(right
.m_value
, scr1
);
1374 releaseScratch(scr
);
1375 releaseScratch(scr1
);
1377 return branch32(cond
, scr
, scr1
);
1380 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1382 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1384 m_assembler
.testlRegReg(reg
, mask
);
1386 if (cond
== NotEqual
)
1387 return branchFalse();
1388 return branchTrue();
1391 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1393 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1395 if (mask
.m_value
== -1)
1396 m_assembler
.testlRegReg(reg
, reg
);
1398 testlImm(mask
.m_value
, reg
);
1400 if (cond
== NotEqual
)
1401 return branchFalse();
1402 return branchTrue();
1405 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1407 ASSERT((cond
== Zero
) || (cond
== NonZero
));
1409 if (mask
.m_value
== -1)
1410 compare32(0, address
.offset
, address
.base
, static_cast<RelationalCondition
>(cond
));
1412 testImm(mask
.m_value
, address
.offset
, address
.base
);
1414 if (cond
== NotEqual
)
1415 return branchFalse();
1416 return branchTrue();
1419 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1421 RegisterID scr
= claimScratch();
1423 move(address
.index
, scr
);
1424 lshift32(TrustedImm32(address
.scale
), scr
);
1425 add32(address
.base
, scr
);
1426 load32(scr
, address
.offset
, scr
);
1428 if (mask
.m_value
== -1)
1429 m_assembler
.testlRegReg(scr
, scr
);
1431 testlImm(mask
.m_value
, scr
);
1433 releaseScratch(scr
);
1435 if (cond
== NotEqual
)
1436 return branchFalse();
1437 return branchTrue();
1442 return Jump(m_assembler
.jmp());
1445 void jump(RegisterID target
)
1447 m_assembler
.jmpReg(target
);
1450 void jump(Address address
)
1452 RegisterID scr
= claimScratch();
1454 if ((address
.offset
< 0) || (address
.offset
>= 64)) {
1455 m_assembler
.loadConstant(address
.offset
, scr
);
1456 m_assembler
.addlRegReg(address
.base
, scr
);
1457 m_assembler
.movlMemReg(scr
, scr
);
1458 } else if (address
.offset
)
1459 m_assembler
.movlMemReg(address
.offset
>> 2, address
.base
, scr
);
1461 m_assembler
.movlMemReg(address
.base
, scr
);
1462 m_assembler
.jmpReg(scr
);
1464 releaseScratch(scr
);
1467 // Arithmetic control flow operations
1469 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1471 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1473 if (cond
== Overflow
) {
1474 m_assembler
.addvlRegReg(src
, dest
);
1475 return branchTrue();
1478 if (cond
== Signed
) {
1479 m_assembler
.addlRegReg(src
, dest
);
1480 // Check if dest is negative
1481 m_assembler
.cmppz(dest
);
1482 return branchFalse();
1485 m_assembler
.addlRegReg(src
, dest
);
1486 compare32(0, dest
, Equal
);
1488 if (cond
== NotEqual
)
1489 return branchFalse();
1490 return branchTrue();
1493 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1495 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1497 move(imm
, scratchReg3
);
1498 return branchAdd32(cond
, scratchReg3
, dest
);
1501 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1503 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1505 if (cond
== Overflow
) {
1506 RegisterID scr1
= claimScratch();
1507 RegisterID scr
= claimScratch();
1508 m_assembler
.dmullRegReg(src
, dest
);
1509 m_assembler
.stsmacl(dest
);
1510 m_assembler
.movImm8(-31, scr
);
1511 m_assembler
.movlRegReg(dest
, scr1
);
1512 m_assembler
.shaRegReg(scr1
, scr
);
1513 m_assembler
.stsmach(scr
);
1514 m_assembler
.cmplRegReg(scr
, scr1
, SH4Condition(Equal
));
1515 releaseScratch(scr1
);
1516 releaseScratch(scr
);
1517 return branchFalse();
1520 m_assembler
.imullRegReg(src
, dest
);
1521 m_assembler
.stsmacl(dest
);
1522 if (cond
== Signed
) {
1523 // Check if dest is negative
1524 m_assembler
.cmppz(dest
);
1525 return branchFalse();
1528 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1530 if (cond
== NotEqual
)
1531 return branchFalse();
1532 return branchTrue();
1535 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1537 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1539 move(imm
, scratchReg3
);
1543 return branchMul32(cond
, scratchReg3
, dest
);
1546 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1548 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1550 if (cond
== Overflow
) {
1551 m_assembler
.subvlRegReg(src
, dest
);
1552 return branchTrue();
1555 if (cond
== Signed
) {
1556 // Check if dest is negative
1557 m_assembler
.sublRegReg(src
, dest
);
1558 compare32(0, dest
, LessThan
);
1559 return branchTrue();
1563 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1565 if (cond
== NotEqual
)
1566 return branchFalse();
1567 return branchTrue();
1570 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1572 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1574 move(imm
, scratchReg3
);
1575 return branchSub32(cond
, scratchReg3
, dest
);
1578 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1580 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
1582 if (cond
== Signed
) {
1584 compare32(0, dest
, static_cast<RelationalCondition
>(LessThan
));
1585 return branchTrue();
1589 compare32(0, dest
, static_cast<RelationalCondition
>(cond
));
1591 if (cond
== NotEqual
)
1592 return branchFalse();
1593 return branchTrue();
1596 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
1598 m_assembler
.ftrcdrmfpul(src
);
1599 m_assembler
.stsfpulReg(dest
);
1600 convertInt32ToDouble(dest
, fscratch
);
1601 failureCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fscratch
, src
));
1603 if (dest
== SH4Registers::r0
)
1604 m_assembler
.cmpEqImmR0(0, dest
);
1606 m_assembler
.movImm8(0, scratchReg3
);
1607 m_assembler
.cmplRegReg(scratchReg3
, dest
, SH4Condition(Equal
));
1609 failureCases
.append(branchTrue());
1612 void neg32(RegisterID dst
)
1614 m_assembler
.neg(dst
, dst
);
1617 void not32(RegisterID dst
)
1619 m_assembler
.notlReg(dst
, dst
);
1622 void urshift32(RegisterID shiftamount
, RegisterID dest
)
1624 compare32(32, shiftamount
, Equal
);
1625 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 4);
1626 m_assembler
.branch(BT_OPCODE
, 1);
1627 m_assembler
.neg(shiftamount
, shiftamount
);
1628 m_assembler
.shllRegReg(dest
, shiftamount
);
1631 void urshift32(TrustedImm32 imm
, RegisterID dest
)
1633 RegisterID scr
= claimScratch();
1634 m_assembler
.loadConstant(-(imm
.m_value
), scr
);
1635 m_assembler
.shaRegReg(dest
, scr
);
1636 releaseScratch(scr
);
1641 return Call(m_assembler
.call(), Call::Linkable
);
1646 return Call(m_assembler
.call(), Call::LinkableNear
);
1649 Call
call(RegisterID target
)
1651 return Call(m_assembler
.call(target
), Call::None
);
1654 void call(Address address
, RegisterID target
)
1656 load32(address
.base
, address
.offset
, target
);
1657 m_assembler
.ensureSpace(m_assembler
.maxInstructionSize
+ 2);
1658 m_assembler
.branch(JSR_OPCODE
, target
);
1668 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1670 RegisterID dataTempRegister
= claimScratch();
1672 dataLabel
= moveWithPatch(initialRightValue
, dataTempRegister
);
1673 m_assembler
.cmplRegReg(dataTempRegister
, left
, SH4Condition(cond
));
1674 releaseScratch(dataTempRegister
);
1676 if (cond
== NotEqual
)
1677 return branchFalse();
1678 return branchTrue();
1681 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
1683 RegisterID scr
= claimScratch();
1685 m_assembler
.loadConstant(left
.offset
, scr
);
1686 m_assembler
.addlRegReg(left
.base
, scr
);
1687 m_assembler
.movlMemReg(scr
, scr
);
1688 RegisterID scr1
= claimScratch();
1689 dataLabel
= moveWithPatch(initialRightValue
, scr1
);
1690 m_assembler
.cmplRegReg(scr1
, scr
, SH4Condition(cond
));
1691 releaseScratch(scr
);
1692 releaseScratch(scr1
);
1694 if (cond
== NotEqual
)
1695 return branchFalse();
1696 return branchTrue();
1705 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
1707 RegisterID scr
= claimScratch();
1708 DataLabelPtr label
= moveWithPatch(initialValue
, scr
);
1709 store32(scr
, address
);
1710 releaseScratch(scr
);
1714 DataLabelPtr
storePtrWithPatch(ImplicitAddress address
) { return storePtrWithPatch(TrustedImmPtr(0), address
); }
1716 int sizeOfConstantPool()
1718 return m_assembler
.sizeOfConstantPool();
1721 Call
tailRecursiveCall()
1723 RegisterID scr
= claimScratch();
1725 m_assembler
.loadConstantUnReusable(0x0, scr
, true);
1726 Jump m_jump
= Jump(m_assembler
.jmp(scr
));
1727 releaseScratch(scr
);
1729 return Call::fromTailJump(m_jump
);
1732 Call
makeTailRecursiveCall(Jump oldJump
)
1735 return tailRecursiveCall();
1744 SH4Assembler::Condition
SH4Condition(RelationalCondition cond
)
1746 return static_cast<SH4Assembler::Condition
>(cond
);
1749 SH4Assembler::Condition
SH4Condition(ResultCondition cond
)
1751 return static_cast<SH4Assembler::Condition
>(cond
);
1754 friend class LinkBuffer
;
1755 friend class RepatchBuffer
;
1757 static void linkCall(void*, Call
, FunctionPtr
);
1758 static void repatchCall(CodeLocationCall
, CodeLocationLabel
);
1759 static void repatchCall(CodeLocationCall
, FunctionPtr
);
1764 #endif // ENABLE(ASSEMBLER)
1766 #endif // MacroAssemblerSH4_h