/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "DFGFPRInfo.h"
#include "DFGGPRInfo.h"
#include "DFGNode.h"
-#include "JSGlobalData.h"
+#include "VM.h"
#include "MacroAssembler.h"
namespace JSC { namespace DFG {
-typedef void (*V_DFGDebugOperation_EP)(ExecState*, void*);
+typedef void (*V_DFGDebugOperation_EPP)(ExecState*, void*, void*);
class AssemblyHelpers : public MacroAssembler {
public:
- AssemblyHelpers(JSGlobalData* globalData, CodeBlock* codeBlock)
- : m_globalData(globalData)
+ AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
+ : m_vm(vm)
, m_codeBlock(codeBlock)
- , m_baselineCodeBlock(codeBlock->baselineVersion())
+ , m_baselineCodeBlock(codeBlock ? codeBlock->baselineVersion() : 0)
{
- ASSERT(m_codeBlock);
- ASSERT(m_baselineCodeBlock);
- ASSERT(!m_baselineCodeBlock->alternative());
- ASSERT(m_baselineCodeBlock->getJITType() == JITCode::BaselineJIT);
+ if (m_codeBlock) {
+ ASSERT(m_baselineCodeBlock);
+ ASSERT(!m_baselineCodeBlock->alternative());
+ ASSERT(m_baselineCodeBlock->getJITType() == JITCode::BaselineJIT);
+ }
}
CodeBlock* codeBlock() { return m_codeBlock; }
- JSGlobalData* globalData() { return m_globalData; }
+ VM* vm() { return m_vm; }
AssemblerType_T& assembler() { return m_assembler; }
#if CPU(X86_64) || CPU(X86)
}
#endif // CPU(X86_64) || CPU(X86)
-#if CPU(ARM)
+#if CPU(ARM) || CPU(ARM64)
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
move(linkRegister, reg);
}
#endif
- void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, GPRReg to)
+#if CPU(MIPS)
+ ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
- loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
+ move(returnAddressRegister, reg);
}
- void emitPutToCallFrameHeader(GPRReg from, RegisterFile::CallFrameHeaderEntry entry)
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
{
- storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ move(reg, returnAddressRegister);
}
- void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
{
- storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ loadPtr(address, returnAddressRegister);
}
+#endif
- Jump branchIfNotCell(GPRReg reg)
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
+ {
+ loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
+ }
+ void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
{
#if USE(JSVALUE64)
- return branchTestPtr(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+ store64(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
#else
- return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
+ store32(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
#endif
}
-
- static Address addressForGlobalVar(GPRReg global, int32_t varNumber)
- {
- return Address(global, varNumber * sizeof(Register));
- }
- static Address tagForGlobalVar(GPRReg global, int32_t varNumber)
+ void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
{
- return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
}
- static Address payloadForGlobalVar(GPRReg global, int32_t varNumber)
+ Jump branchIfNotCell(GPRReg reg)
{
- return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+#if USE(JSVALUE64)
+ return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+#else
+ return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
+#endif
}
-
+
static Address addressFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register));
}
+ static Address addressFor(int operand)
+ {
+ return addressFor(static_cast<VirtualRegister>(operand));
+ }
static Address tagFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
}
+ static Address tagFor(int operand)
+ {
+ return tagFor(static_cast<VirtualRegister>(operand));
+ }
static Address payloadFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
+ static Address payloadFor(int operand)
+ {
+ return payloadFor(static_cast<VirtualRegister>(operand));
+ }
Jump branchIfNotObject(GPRReg structureReg)
{
}
// Add a debug call. This call has no effect on JIT code execution state.
- void debugCall(V_DFGDebugOperation_EP function, void* argument)
+ void debugCall(V_DFGDebugOperation_EPP function, void* argument)
{
size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
- ScratchBuffer* scratchBuffer = m_globalData->scratchBufferForSize(scratchSize);
+ ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
-#if CPU(X86_64) || CPU(ARM_THUMB2)
+#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
+ move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
#elif CPU(X86)
poke(GPRInfo::callFrameRegister, 0);
poke(TrustedImmPtr(argument), 1);
+ poke(TrustedImmPtr(buffer), 2);
GPRReg scratch = GPRInfo::regT0;
#else
#error "DFG JIT not supported on this platform."
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
}
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- loadPtr(buffer + i, GPRInfo::toRegister(i));
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
}
// These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
{
- moveDoubleToPtr(fpr, gpr);
- subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ moveDoubleTo64(fpr, gpr);
+ sub64(GPRInfo::tagTypeNumberRegister, gpr);
jitAssertIsJSDouble(gpr);
return gpr;
}
FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
{
jitAssertIsJSDouble(gpr);
- addPtr(GPRInfo::tagTypeNumberRegister, gpr);
- movePtrToDouble(gpr, fpr);
+ add64(GPRInfo::tagTypeNumberRegister, gpr);
+ move64ToDouble(gpr, fpr);
return fpr;
}
#endif
-#if USE(JSVALUE32_64) && CPU(X86)
- void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
- {
- movePackedToInt32(fpr, payloadGPR);
- rshiftPacked(TrustedImm32(32), fpr);
- movePackedToInt32(fpr, tagGPR);
- }
- void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
- {
- jitAssertIsJSDouble(tagGPR);
- moveInt32ToPacked(payloadGPR, fpr);
- moveInt32ToPacked(tagGPR, scratchFPR);
- lshiftPacked(TrustedImm32(32), scratchFPR);
- orPacked(scratchFPR, fpr);
- }
-#endif
-
-#if USE(JSVALUE32_64) && CPU(ARM)
+#if USE(JSVALUE32_64)
void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
{
- m_assembler.vmov(payloadGPR, tagGPR, fpr);
+ moveDoubleToInts(fpr, payloadGPR, tagGPR);
}
void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
{
- jitAssertIsJSDouble(tagGPR);
- UNUSED_PARAM(scratchFPR);
- m_assembler.vmov(fpr, payloadGPR, tagGPR);
+ moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
}
#endif
Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
{
#if USE(JSVALUE64)
- return branchTestPtr(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
+ return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&vm()->exception));
#elif USE(JSVALUE32_64)
- return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&vm()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
#endif
}
return codeBlock()->globalObjectFor(codeOrigin);
}
- JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
- {
- JSGlobalObject* object = globalObjectFor(codeOrigin);
- return object->methodTable()->toThisObject(object, 0);
- }
-
bool strictModeFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
return codeBlock()->isStrictMode();
- return codeOrigin.inlineCallFrame->callee->jsExecutable()->isStrictMode();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
}
+ ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
+
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
}
+ CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return baselineCodeBlock();
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ }
+
CodeBlock* baselineCodeBlock()
{
return m_baselineCodeBlock;
}
- Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+ int argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return codeBlock()->argumentsRegister();
+
+ return baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister() + inlineCallFrame->stackOffset;
+ }
- static const double twoToThe32;
+ int argumentsRegisterFor(const CodeOrigin& codeOrigin)
+ {
+ return argumentsRegisterFor(codeOrigin.inlineCallFrame);
+ }
+
+ SharedSymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
+ {
+ return baselineCodeBlockFor(codeOrigin)->symbolTable();
+ }
+
+ int offsetOfLocals(const CodeOrigin& codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return 0;
+ return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
+ }
+ int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
+ return (codeOrigin.inlineCallFrame->stackOffset + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register);
+ }
+
+ Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+
protected:
- JSGlobalData* m_globalData;
+ VM* m_vm;
CodeBlock* m_codeBlock;
CodeBlock* m_baselineCodeBlock;