#if ENABLE(JIT)
#include "Executable.h"
+#include "JIT.h"
+#include "JITInlines.h"
#include "JSInterfaceJIT.h"
+#include "JSStack.h"
#include "LinkBuffer.h"
namespace JSC {
class SpecializedThunkJIT : public JSInterfaceJIT {
public:
static const int ThisArgument = -1;
- SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
- : m_expectedArgCount(expectedArgCount)
- , m_globalData(globalData)
- , m_pool(pool)
+ SpecializedThunkJIT(VM* vm, int expectedArgCount)
+ : JSInterfaceJIT(vm)
{
+ emitFunctionPrologue();
// Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), Imm32(expectedArgCount + 1)));
+ m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
+ }
+
+ explicit SpecializedThunkJIT(VM* vm)
+ : JSInterfaceJIT(vm)
+ {
+ emitFunctionPrologue();
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
m_failures.append(emitLoadDouble(src, dst, scratch));
}
void loadCellArgument(int argument, RegisterID dst)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
m_failures.append(emitLoadJSCell(src, dst));
}
- void loadJSStringArgument(int argument, RegisterID dst)
+ void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, 0), ImmPtr(m_globalData->jsStringVPtr)));
- m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ m_failures.append(branchStructure(*this, NotEqual,
+ Address(dst, JSCell::structureIDOffset()),
+ vm.stringStructure.get()));
+ }
+
+ void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
+ {
+ loadCellArgument(argument, dst);
+ emitLoadStructure(dst, scratch, dst);
+ appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
+ // We have to reload the argument since emitLoadStructure clobbered it.
+ loadCellArgument(argument, dst);
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
{
- unsigned src = argumentToVirtualRegister(argument);
+ unsigned src = CallFrame::argumentOffset(argument);
failTarget = emitLoadInt32(src, dst);
}
{
m_failures.append(failure);
}
-
+#if USE(JSVALUE64)
void returnJSValue(RegisterID src)
{
if (src != regT0)
move(src, regT0);
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ emitFunctionEpilogue();
ret();
}
+#else
+ void returnJSValue(RegisterID payload, RegisterID tag)
+ {
+ ASSERT_UNUSED(payload, payload == regT0);
+ ASSERT_UNUSED(tag, tag == regT1);
+ emitFunctionEpilogue();
+ ret();
+ }
+#endif
void returnDouble(FPRegisterID src)
{
#if USE(JSVALUE64)
- moveDoubleToPtr(src, regT0);
- subPtr(tagTypeNumberRegister, regT0);
-#elif USE(JSVALUE32_64)
+ moveDoubleTo64(src, regT0);
+ Jump zero = branchTest64(Zero, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ Jump done = jump();
+ zero.link(this);
+ move(tagTypeNumberRegister, regT0);
+ done.link(this);
+#else
+#if !CPU(X86)
+ // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it.
+ moveDoubleToInts(src, regT0, regT1);
+#else
storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
-#else
- UNUSED_PARAM(src);
- ASSERT_NOT_REACHED();
- m_failures.append(jump());
#endif
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ Jump lowNonZero = branchTestPtr(NonZero, regT1);
+ Jump highNonZero = branchTestPtr(NonZero, regT0);
+ move(TrustedImm32(0), regT0);
+ move(TrustedImm32(Int32Tag), regT1);
+ lowNonZero.link(this);
+ highNonZero.link(this);
+#endif
+ emitFunctionEpilogue();
ret();
}
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ emitFunctionEpilogue();
ret();
}
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ emitFunctionEpilogue();
ret();
}
- PassRefPtr<NativeExecutable> finalize()
+ MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(this, m_pool.get());
- patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs->ctiNativeCallThunk()->generatedJITCode().addressForCall()));
- return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
+ patchBuffer.link(m_failures, CodeLocationLabel(fallback));
+ for (unsigned i = 0; i < m_calls.size(); i++)
+ patchBuffer.link(m_calls[i].first, m_calls[i].second);
+ return FINALIZE_CODE(patchBuffer, ("Specialized thunk for %s", thunkKind));
+ }
+
+ // Assumes that the target function uses fpRegister0 as the first argument
+ // and return value. Like any sensible architecture would.
+ void callDoubleToDouble(FunctionPtr function)
+ {
+ m_calls.append(std::make_pair(call(), function));
}
- private:
- int argumentToVirtualRegister(unsigned argument)
+ void callDoubleToDoublePreservingReturn(FunctionPtr function)
{
- return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
+ if (!isX86())
+ preserveReturnAddressAfterCall(regT3);
+ callDoubleToDouble(function);
+ if (!isX86())
+ restoreReturnAddressBeforeReturn(regT3);
}
+ private:
+
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, regT0);
-#elif USE(JSVALUE32_64)
- move(Imm32(JSValue::Int32Tag), regT1);
+ or64(tagTypeNumberRegister, regT0);
#else
- signExtend32ToPtr(regT0, regT0);
- // If we can't tag the result, give up and jump to the slow case
- m_failures.append(branchAddPtr(Overflow, regT0, regT0));
- addPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
#endif
}
void tagReturnAsJSCell()
{
#if USE(JSVALUE32_64)
- move(Imm32(JSValue::CellTag), regT1);
+ move(TrustedImm32(JSValue::CellTag), regT1);
#endif
}
- int m_expectedArgCount;
- JSGlobalData* m_globalData;
- RefPtr<ExecutablePool> m_pool;
MacroAssembler::JumpList m_failures;
+ Vector<std::pair<Call, FunctionPtr>> m_calls;
};
}