class SpecializedThunkJIT : public JSInterfaceJIT {
public:
static const int ThisArgument = -1;
- SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
- : m_expectedArgCount(expectedArgCount)
- , m_globalData(globalData)
- , m_pool(pool)
+ SpecializedThunkJIT(int expectedArgCount)
{
// Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), TrustedImm32(expectedArgCount + 1)));
+ m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
m_failures.append(emitLoadDouble(src, dst, scratch));
}
void loadCellArgument(int argument, RegisterID dst)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
m_failures.append(emitLoadJSCell(src, dst));
}
- void loadJSStringArgument(int argument, RegisterID dst)
+ void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, 0), TrustedImmPtr(m_globalData->jsStringVPtr)));
- m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get())));
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
failTarget = emitLoadInt32(src, dst);
}
{
if (src != regT0)
move(src, regT0);
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
void returnDouble(FPRegisterID src)
{
#if USE(JSVALUE64)
- moveDoubleToPtr(src, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(src, regT0);
+ Jump zero = branchTest64(Zero, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ Jump done = jump();
+ zero.link(this);
+ move(tagTypeNumberRegister, regT0);
+ done.link(this);
#else
storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+ Jump lowNonZero = branchTestPtr(NonZero, regT1);
+ Jump highNonZero = branchTestPtr(NonZero, regT0);
+ move(TrustedImm32(0), regT0);
+ move(TrustedImm32(Int32Tag), regT1);
+ lowNonZero.link(this);
+ highNonZero.link(this);
#endif
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
- MacroAssemblerCodePtr finalize(JSGlobalData& globalData, MacroAssemblerCodePtr fallback)
+ MacroAssemblerCodeRef finalize(VM& vm, MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(globalData, this, m_pool.get());
+ LinkBuffer patchBuffer(vm, this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
- return patchBuffer.finalizeCode().m_code;
+ for (unsigned i = 0; i < m_calls.size(); i++)
+ patchBuffer.link(m_calls[i].first, m_calls[i].second);
+ return FINALIZE_CODE(patchBuffer, ("Specialized thunk for %s", thunkKind));
+ }
+
+ // Assumes that the target function uses fpRegister0 as the first argument
+ // and return value. Like any sensible architecture would.
+ void callDoubleToDouble(FunctionPtr function)
+ {
+ m_calls.append(std::make_pair(call(), function));
}
- private:
- int argumentToVirtualRegister(unsigned argument)
+ void callDoubleToDoublePreservingReturn(FunctionPtr function)
{
- return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
+ if (!isX86())
+ preserveReturnAddressAfterCall(regT3);
+ callDoubleToDouble(function);
+ if (!isX86())
+ restoreReturnAddressBeforeReturn(regT3);
}
+ private:
+
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, regT0);
+ or64(tagTypeNumberRegister, regT0);
#else
move(TrustedImm32(JSValue::Int32Tag), regT1);
#endif
#endif
}
- int m_expectedArgCount;
- JSGlobalData* m_globalData;
- RefPtr<ExecutablePool> m_pool;
MacroAssembler::JumpList m_failures;
+ Vector<std::pair<Call, FunctionPtr> > m_calls;
};
}