namespace JSC {
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
public:
enum Condition {
};
enum DoubleCondition {
- DoubleEqual = X86Assembler::ConditionE,
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
DoubleNotEqual = X86Assembler::ConditionNE,
DoubleGreaterThan = X86Assembler::ConditionA,
DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
- DoubleLessThan = X86Assembler::ConditionB,
- DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
};
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
- static const RegisterID stackPointerRegister = X86::esp;
+ static const RegisterID stackPointerRegister = X86Registers::esp;
// Integer arithmetic operations:
//
{
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
- if (shift_amount != X86::ecx) {
- swap(shift_amount, X86::ecx);
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
// E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
if (dest == shift_amount)
- m_assembler.shll_CLr(X86::ecx);
+ m_assembler.shll_CLr(X86Registers::ecx);
// E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
- else if (dest == X86::ecx)
+ else if (dest == X86Registers::ecx)
m_assembler.shll_CLr(shift_amount);
// E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.shll_CLr(dest);
- swap(shift_amount, X86::ecx);
+ swap(shift_amount, X86Registers::ecx);
} else
m_assembler.shll_CLr(dest);
}
{
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
- if (shift_amount != X86::ecx) {
- swap(shift_amount, X86::ecx);
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
// E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
if (dest == shift_amount)
- m_assembler.sarl_CLr(X86::ecx);
+ m_assembler.sarl_CLr(X86Registers::ecx);
// E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
- else if (dest == X86::ecx)
+ else if (dest == X86Registers::ecx)
m_assembler.sarl_CLr(shift_amount);
// E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.sarl_CLr(dest);
- swap(shift_amount, X86::ecx);
+ swap(shift_amount, X86Registers::ecx);
} else
m_assembler.sarl_CLr(dest);
}
m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
}
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
{
m_assembler.movl_mr_disp32(address.offset, address.base, dest);
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
+ ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
- m_assembler.ucomisd_rr(right, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
- {
- m_assembler.ucomisd_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
return branch32(Equal, dest, Imm32(0x80000000));
}
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
void zeroDouble(FPRegisterID srcDest)
{
ASSERT(isSSE2Present());
m_assembler.movl_i32r(imm.m_value, dest);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void move(RegisterID src, RegisterID dest)
{
// Note: on 64-bit this is is a full register move; perhaps it would be
void move(ImmPtr imm, RegisterID dest)
{
- if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
- m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
- else
- m_assembler.movq_i64r(imm.asIntptr(), dest);
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
}
void swap(RegisterID reg1, RegisterID reg2)
{
- m_assembler.xchgq_rr(reg1, reg2);
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
}
void signExtend32ToPtr(RegisterID src, RegisterID dest)
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
Jump branch16(Condition cond, BaseIndex left, RegisterID right)
{
m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
return static_cast<X86Assembler::Condition>(cond);
}
- X86Assembler::Condition x86Condition(DoubleCondition cond)
- {
- return static_cast<X86Assembler::Condition>(cond);
- }
-
private:
// Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
// x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
friend class MacroAssemblerX86;
-#if PLATFORM(X86)
-#if PLATFORM(MAC)
+#if CPU(X86)
+#if OS(MAC_OS_X)
// All X86 Macs are guaranteed to support at least SSE2,
static bool isSSE2Present()
return true;
}
-#else // PLATFORM(MAC)
+#else // OS(MAC_OS_X)
enum SSE2CheckState {
NotCheckedSSE2,
static SSE2CheckState s_sse2CheckState;
-#endif // PLATFORM(MAC)
-#elif !defined(NDEBUG) // PLATFORM(X86)
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
// On x86-64 we should never be checking for SSE2 in a non-debug build,
// but non debug add this method to keep the asserts above happy.