#ifndef MacroAssemblerX86Common_h
#define MacroAssemblerX86Common_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "X86Assembler.h"
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
public:
+#if CPU(X86_64)
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+#endif
+
+protected:
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
- enum Condition {
+public:
+ typedef X86Assembler::XMMRegisterID XMMRegisterID;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -128 && value <= 127;
+ }
+
+ enum RelationalCondition {
Equal = X86Assembler::ConditionE,
NotEqual = X86Assembler::ConditionNE,
Above = X86Assembler::ConditionA,
GreaterThan = X86Assembler::ConditionG,
GreaterThanOrEqual = X86Assembler::ConditionGE,
LessThan = X86Assembler::ConditionL,
- LessThanOrEqual = X86Assembler::ConditionLE,
+ LessThanOrEqual = X86Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
Overflow = X86Assembler::ConditionO,
Signed = X86Assembler::ConditionS,
+ PositiveOrZero = X86Assembler::ConditionNS,
Zero = X86Assembler::ConditionE,
NonZero = X86Assembler::ConditionNE
};
enum DoubleCondition {
- DoubleEqual = X86Assembler::ConditionE,
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
DoubleNotEqual = X86Assembler::ConditionNE,
DoubleGreaterThan = X86Assembler::ConditionA,
DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
- DoubleLessThan = X86Assembler::ConditionB,
- DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
};
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
- static const RegisterID stackPointerRegister = X86::esp;
+ static const RegisterID stackPointerRegister = X86Registers::esp;
+ static const RegisterID framePointerRegister = X86Registers::ebp;
+
+ static bool canBlind() { return true; }
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
// Integer arithmetic operations:
//
// Operations are typically two operand - operation(source, srcDst)
- // For many operations the source may be an Imm32, the srcDst operand
+ // For many operations the source may be an TrustedImm32, the srcDst operand
// may often be a memory location (explictly described using an Address
// object).
m_assembler.addl_rr(src, dest);
}
- void add32(Imm32 imm, Address address)
+ void add32(TrustedImm32 imm, Address address)
{
m_assembler.addl_im(imm.m_value, address.offset, address.base);
}
- void add32(Imm32 imm, RegisterID dest)
+ void add32(TrustedImm32 imm, RegisterID dest)
{
- m_assembler.addl_ir(imm.m_value, dest);
+ if (imm.m_value == 1)
+ m_assembler.inc_r(dest);
+ else
+ m_assembler.addl_ir(imm.m_value, dest);
}
void add32(Address src, RegisterID dest)
{
m_assembler.addl_rm(src, dest.offset, dest.base);
}
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
void and32(RegisterID src, RegisterID dest)
{
m_assembler.andl_rr(src, dest);
}
- void and32(Imm32 imm, RegisterID dest)
+ void and32(TrustedImm32 imm, RegisterID dest)
{
m_assembler.andl_ir(imm.m_value, dest);
}
m_assembler.andl_mr(src.offset, src.base, dest);
}
- void and32(Imm32 imm, Address address)
+ void and32(TrustedImm32 imm, Address address)
{
m_assembler.andl_im(imm.m_value, address.offset, address.base);
}
- void lshift32(Imm32 imm, RegisterID dest)
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
{
- m_assembler.shll_i8r(imm.m_value, dest);
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ and32(op2, dest);
+ else {
+ move(op2, dest);
+ and32(op1, dest);
+ }
}
-
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ and32(imm, dest);
+ }
+
void lshift32(RegisterID shift_amount, RegisterID dest)
{
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- if (shift_amount != X86::ecx) {
- swap(shift_amount, X86::ecx);
-
- // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
- if (dest == shift_amount)
- m_assembler.shll_CLr(X86::ecx);
- // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
- else if (dest == X86::ecx)
- m_assembler.shll_CLr(shift_amount);
- // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
- else
- m_assembler.shll_CLr(dest);
-
- swap(shift_amount, X86::ecx);
- } else
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
m_assembler.shll_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ lshift32(shift_amount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ lshift32(imm, dest);
}
void mul32(RegisterID src, RegisterID dest)
m_assembler.imull_mr(src.offset, src.base, dest);
}
- void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.imull_i32r(src, imm.m_value, dest);
}
m_assembler.negl_m(srcDest.offset, srcDest.base);
}
- void not32(RegisterID srcDest)
- {
- m_assembler.notl_r(srcDest);
- }
-
- void not32(Address srcDest)
- {
- m_assembler.notl_m(srcDest.offset, srcDest.base);
- }
-
void or32(RegisterID src, RegisterID dest)
{
m_assembler.orl_rr(src, dest);
}
- void or32(Imm32 imm, RegisterID dest)
+ void or32(TrustedImm32 imm, RegisterID dest)
{
m_assembler.orl_ir(imm.m_value, dest);
}
m_assembler.orl_mr(src.offset, src.base, dest);
}
- void or32(Imm32 imm, Address address)
+ void or32(TrustedImm32 imm, Address address)
{
m_assembler.orl_im(imm.m_value, address.offset, address.base);
}
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or32(imm, dest);
+ }
+
void rshift32(RegisterID shift_amount, RegisterID dest)
{
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- if (shift_amount != X86::ecx) {
- swap(shift_amount, X86::ecx);
-
- // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
- if (dest == shift_amount)
- m_assembler.sarl_CLr(X86::ecx);
- // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
- else if (dest == X86::ecx)
- m_assembler.sarl_CLr(shift_amount);
- // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
- else
- m_assembler.sarl_CLr(dest);
-
- swap(shift_amount, X86::ecx);
- } else
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
m_assembler.sarl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
}
- void rshift32(Imm32 imm, RegisterID dest)
+ void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ rshift32(shift_amount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
{
m_assembler.sarl_i8r(imm.m_value, dest);
}
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shrl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+ if (src != dest)
+ move(src, dest);
+ urshift32(shift_amount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shrl_i8r(imm.m_value, dest);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ urshift32(imm, dest);
+ }
+
void sub32(RegisterID src, RegisterID dest)
{
m_assembler.subl_rr(src, dest);
}
- void sub32(Imm32 imm, RegisterID dest)
+ void sub32(TrustedImm32 imm, RegisterID dest)
{
- m_assembler.subl_ir(imm.m_value, dest);
+ if (imm.m_value == 1)
+ m_assembler.dec_r(dest);
+ else
+ m_assembler.subl_ir(imm.m_value, dest);
}
- void sub32(Imm32 imm, Address address)
+ void sub32(TrustedImm32 imm, Address address)
{
m_assembler.subl_im(imm.m_value, address.offset, address.base);
}
m_assembler.subl_rm(src, dest.offset, dest.base);
}
-
void xor32(RegisterID src, RegisterID dest)
{
m_assembler.xorl_rr(src, dest);
}
- void xor32(Imm32 imm, Address dest)
+ void xor32(TrustedImm32 imm, Address dest)
{
- m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ if (imm.m_value == -1)
+ m_assembler.notl_m(dest.offset, dest.base);
+ else
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
}
- void xor32(Imm32 imm, RegisterID dest)
+ void xor32(TrustedImm32 imm, RegisterID dest)
{
+ if (imm.m_value == -1)
+ m_assembler.notl_r(dest);
+ else
m_assembler.xorl_ir(imm.m_value, dest);
}
m_assembler.xorl_mr(src.offset, src.base, dest);
}
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(TrustedImm32(0), dest);
+ else if (op1 == dest)
+ xor32(op2, dest);
+ else {
+ move(op2, dest);
+ xor32(op1, dest);
+ }
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ xor32(imm, dest);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtsd_rr(src, dst);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ m_assembler.andnpd_rr(src, dst);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+ m_assembler.xorpd_rr(src, dst);
+ }
+
// Memory access operations:
//
// Loads are of the form load(address, destination) and stores of the form
- // store(source, address). The source for a store may be an Imm32. Address
+ // store(source, address). The source for a store may be an TrustedImm32. Address
// operand objects to loads and store will be implicitly constructed if a
// register is passed.
m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
}
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
{
+ padBeforePatch();
m_assembler.movl_mr_disp32(address.offset, address.base, dest);
return DataLabel32(this);
}
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(value));
+ AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8Signed(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, dest);
+ }
+
void load16(BaseIndex address, RegisterID dest)
{
m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
}
+
+ void load16(Address address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, dest);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16Signed(Address address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, dest);
+ }
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
+ padBeforePatch();
m_assembler.movl_rm_disp32(src, address.offset, address.base);
return DataLabel32(this);
}
m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
}
- void store32(Imm32 imm, ImplicitAddress address)
+ void store32(TrustedImm32 imm, ImplicitAddress address)
{
m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
}
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+ }
+
+ void store8(TrustedImm32 imm, BaseIndex address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
+ {
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ return X86Registers::eax;
+
+ if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ return X86Registers::ebx;
+
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ return X86Registers::ecx;
+ }
+
+ static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
+ {
+ if (address.base != X86Registers::eax)
+ return X86Registers::eax;
+
+ ASSERT(address.base != X86Registers::edx);
+ return X86Registers::edx;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(RegisterID src, Address address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp = getUnusedRegister(address);
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
+ }
// Floating-point operation:
//
// Presently only supports SSE, not x87 floating point.
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (src != dest)
+ m_assembler.movsd_rr(src, dest);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+#if CPU(X86)
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.m_value, dest);
+#else
+ move(address, scratchRegister);
+ loadDouble(scratchRegister, dest);
+#endif
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.movsd_mr(address.offset, address.base, dest);
}
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
ASSERT(isSSE2Present());
m_assembler.movsd_rm(src, address.offset, address.base);
}
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsd2ss_rr(src, dst);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtss2sd_rr(src, dst);
+ }
void addDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.addsd_rr(src, dest);
}
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ addDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ addDouble(op1, dest);
+ }
+ }
+
void addDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.divsd_rr(src, dest);
}
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A / B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ divDouble(op2, dest);
+ }
+
void divDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.subsd_rr(src, dest);
}
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ subDouble(op2, dest);
+ }
+
void subDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.mulsd_rr(src, dest);
}
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ mulDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ mulDouble(op1, dest);
+ }
+ }
+
void mulDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
+ ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
- m_assembler.ucomisd_rr(right, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
- {
- m_assembler.ucomisd_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ if (left == right)
+ return Jump(m_assembler.jnp());
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ if (left == right)
+ return Jump(m_assembler.jp());
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
// (specifically, in this case, INT_MIN).
- Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
+ }
+
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ }
+
+#if CPU(X86_64)
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2siq_rr(src, dest);
+ }
+#endif
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
{
ASSERT(isSSE2Present());
m_assembler.cvttsd2si_rr(src, dest);
- return branch32(Equal, dest, Imm32(0x80000000));
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
}
- void zeroDouble(FPRegisterID srcDest)
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
{
ASSERT(isSSE2Present());
- m_assembler.xorpd_rr(srcDest, srcDest);
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
}
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psllq_i8r(imm.m_value, reg);
+ }
+
+ void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psrlq_i8r(imm.m_value, reg);
+ }
+
+ void orPacked(XMMRegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.por_rr(src, dst);
+ }
+
+ void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ void movePackedToInt32(XMMRegisterID src, RegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
// Stack manipulation operations:
//
m_assembler.push_m(address.offset, address.base);
}
- void push(Imm32 imm)
+ void push(TrustedImm32 imm)
{
m_assembler.push_i32(imm.m_value);
}
//
// Move values in registers.
- void move(Imm32 imm, RegisterID dest)
+ void move(TrustedImm32 imm, RegisterID dest)
{
- // Note: on 64-bit the Imm32 value is zero extended into the register, it
+ // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
// may be useful to have a separate version that sign extends the value?
if (!imm.m_value)
m_assembler.xorl_rr(dest, dest);
m_assembler.movl_i32r(imm.m_value, dest);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void move(RegisterID src, RegisterID dest)
{
// Note: on 64-bit this is is a full register move; perhaps it would be
m_assembler.movq_rr(src, dest);
}
- void move(ImmPtr imm, RegisterID dest)
+ void move(TrustedImmPtr imm, RegisterID dest)
{
- if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
- m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
- else
- m_assembler.movq_i64r(imm.asIntptr(), dest);
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+ }
+
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.m_value, dest);
}
void swap(RegisterID reg1, RegisterID reg2)
{
- m_assembler.xchgq_rr(reg1, reg2);
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
}
void signExtend32ToPtr(RegisterID src, RegisterID dest)
m_assembler.movl_rr(src, dest);
}
- void move(ImmPtr imm, RegisterID dest)
+ void move(TrustedImmPtr imm, RegisterID dest)
{
m_assembler.movl_i32r(imm.asIntptr(), dest);
}
// used (representing the names 'below' and 'above').
//
// Operands to the comparision are provided in the expected order, e.g.
- // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
// treated as a signed 32bit value, is less than or equal to 5.
//
// jz and jnz test whether the first operand is equal to zero, and take
// an optional second operand of a mask under which to perform the test.
public:
- Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpl_rr(right, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
m_assembler.testl_rr(left, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32(Condition cond, RegisterID left, Address right)
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
{
m_assembler.cmpl_mr(right.offset, right.base, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32(Condition cond, Address left, RegisterID right)
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
{
m_assembler.cmpl_rm(right, left.offset, left.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32(Condition cond, Address left, Imm32 right)
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
{
m_assembler.cmpl_im(right.m_value, left.offset, left.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
{
m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
{
- m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
+ m_assembler.testl_rr(reg, mask);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT(!(right.m_value & 0xFFFF0000));
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+ if (mask.m_value == 0xff)
+ m_assembler.testb_rr(reg, reg);
+ else
+ m_assembler.testb_i8r(mask.m_value, reg);
+ } else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ }
- m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ Jump branch(ResultCondition cond)
+ {
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT((cond == Zero) || (cond == NonZero));
- m_assembler.testl_rr(reg, mask);
+ test32(cond, reg, mask);
+ return branch(cond);
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ generateTest32(address, mask);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT((cond == Zero) || (cond == NonZero));
- // if we are only interested in the low seven bits, this can be tested with a testb
if (mask.m_value == -1)
- m_assembler.testl_rr(reg, reg);
- else if ((mask.m_value & ~0x7f) == 0)
- m_assembler.testb_i8r(mask.m_value, reg);
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
else
- m_assembler.testl_i32r(mask.m_value, reg);
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
-
- Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT((cond == Zero) || (cond == NonZero));
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
+ m_assembler.cmpb_im(0, address.offset, address.base);
else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
-
- Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
{
- ASSERT((cond == Zero) || (cond == NonZero));
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
// * jo operations branch if the (signed) arithmetic
// operation caused an overflow to occur.
- Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchAdd32(Condition cond, Imm32 src, Address dest)
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchAdd32(Condition cond, RegisterID src, Address dest)
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchAdd32(Condition cond, Address src, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchAdd32(cond, src2, dest);
+ move(src2, dest);
+ return branchAdd32(cond, src1, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(src, dest);
+ return branchAdd32(cond, imm, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
{
- ASSERT(cond == Overflow);
mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchMul32(Condition cond, Address src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
{
- ASSERT(cond == Overflow);
mul32(imm, src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchMul32(cond, src2, dest);
+ move(src2, dest);
+ return branchMul32(cond, src1, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSub32(Condition cond, Imm32 imm, Address dest)
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSub32(Condition cond, RegisterID src, Address dest)
+ Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSub32(Condition cond, Address src, RegisterID dest)
+ Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(src1 == dest || src2 != dest);
+
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
or32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
m_assembler.ret();
}
- void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
- {
- m_assembler.cmpl_rr(right, left);
- m_assembler.setCC_r(x86Condition(cond), dest);
- }
-
- void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
- {
- m_assembler.cmpl_mr(left.offset, left.base, right);
- m_assembler.setCC_r(x86Condition(cond), dest);
- }
-
- void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
{
- if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
- m_assembler.testl_rr(left, left);
- else
- m_assembler.cmpl_ir(right.m_value, left);
- m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ set32(x86Condition(cond), dest);
}
-
- void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpl_rr(right, left);
- m_assembler.setCC_r(x86Condition(cond), dest);
- m_assembler.movzbl_rr(dest, dest);
+ set32(x86Condition(cond), dest);
}
- void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
m_assembler.testl_rr(left, left);
else
m_assembler.cmpl_ir(right.m_value, left);
- m_assembler.setCC_r(x86Condition(cond), dest);
- m_assembler.movzbl_rr(dest, dest);
+ set32(x86Condition(cond), dest);
}
// FIXME:
- // The mask should be optional... paerhaps the argument order should be
+ // The mask should be optional... perhaps the argument order should be
// dest-src, operations always have a dest? ... possibly not true, considering
// asm ops like test, or pseudo ops like pop().
- void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
+ m_assembler.cmpb_im(0, address.offset, address.base);
else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
}
- void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
{
- if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
- else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- m_assembler.setCC_r(x86Condition(cond), dest);
- m_assembler.movzbl_rr(dest, dest);
+ generateTest32(address, mask);
+ set32(x86Condition(cond), dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ void memoryFence()
+ {
+ m_assembler.mfence();
}
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return X86Assembler::maxJumpReplacementSize();
+ }
+
+#if USE(MASM_PROBE)
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void dump(const char* indentation = 0);
+ private:
+ void dumpCPURegisters(const char* indentation);
+ };
+#endif // USE(MASM_PROBE)
+
protected:
- X86Assembler::Condition x86Condition(Condition cond)
+ X86Assembler::Condition x86Condition(RelationalCondition cond)
{
return static_cast<X86Assembler::Condition>(cond);
}
- X86Assembler::Condition x86Condition(DoubleCondition cond)
+ X86Assembler::Condition x86Condition(ResultCondition cond)
{
return static_cast<X86Assembler::Condition>(cond);
}
+ void set32(X86Assembler::Condition cond, RegisterID dest)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only set the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (dest >= 4) {
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ m_assembler.setCC_r(cond, X86Registers::eax);
+ m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ return;
+ }
+#endif
+ m_assembler.setCC_r(cond, dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
private:
// Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
// x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
friend class MacroAssemblerX86;
-#if PLATFORM(X86)
-#if PLATFORM(MAC)
+ ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else if (!(mask.m_value & ~0xff))
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ else if (!(mask.m_value & ~0xff00))
+ m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
+ else if (!(mask.m_value & ~0xff0000))
+ m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
+ else if (!(mask.m_value & ~0xff000000))
+ m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ }
+
+#if CPU(X86)
+#if OS(MAC_OS_X)
// All X86 Macs are guaranteed to support at least SSE2,
static bool isSSE2Present()
return true;
}
-#else // PLATFORM(MAC)
+#else // OS(MAC_OS_X)
enum SSE2CheckState {
NotCheckedSSE2,
static SSE2CheckState s_sse2CheckState;
-#endif // PLATFORM(MAC)
-#elif !defined(NDEBUG) // PLATFORM(X86)
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
// On x86-64 we should never be checking for SSE2 in a non-debug build,
// but non debug add this method to keep the asserts above happy.