#ifndef JITCode_h
#define JITCode_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
-
#include "CallFrame.h"
#include "JSValue.h"
#include "MacroAssemblerCodeRef.h"
#include "Profiler.h"
+#endif
namespace JSC {
+#if ENABLE(JIT)
class JSGlobalData;
class RegisterFile;
-
+#endif
+
class JITCode {
+#if ENABLE(JIT)
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
+#else
+ JITCode() { }
+#endif
public:
+ enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT };
+
+ static JITType bottomTierJIT()
+ {
+ return BaselineJIT;
+ }
+
+ static JITType topTierJIT()
+ {
+ return DFGJIT;
+ }
+
+ static JITType nextTierJIT(JITType jitType)
+ {
+ ASSERT_UNUSED(jitType, jitType == BaselineJIT || jitType == DFGJIT);
+ return DFGJIT;
+ }
+
+ static bool isOptimizingJIT(JITType jitType)
+ {
+ return jitType == DFGJIT;
+ }
+
+ static bool isBaselineCode(JITType jitType)
+ {
+ return jitType == InterpreterThunk || jitType == BaselineJIT;
+ }
+
+#if ENABLE(JIT)
JITCode()
+ : m_jitType(None)
{
}
- JITCode(const CodeRef ref)
+ JITCode(const CodeRef ref, JITType jitType)
: m_ref(ref)
+ , m_jitType(jitType)
{
+ ASSERT(jitType != None);
}
-
+
bool operator !() const
{
- return !m_ref.m_code.executableAddress();
+ return !m_ref;
}
CodePtr addressForCall()
{
- return m_ref.m_code;
+ return m_ref.code();
+ }
+
+ void* executableAddressAtOffset(size_t offset) const
+ {
+ ASSERT(offset < size());
+ return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
+ }
+
+ void* dataAddressAtOffset(size_t offset) const
+ {
+ ASSERT(offset < size());
+ return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
}
// This function returns the offset in bytes of 'pointerIntoCode' into
// block of code. It is ASSERTed that no codeblock >4gb in size.
unsigned offsetOf(void* pointerIntoCode)
{
- intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
return static_cast<unsigned>(result);
}
// Execute the code!
- inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
{
- return JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
+ return globalData->exception ? jsNull() : result;
}
- void* start()
+ void* start() const
{
- return m_ref.m_code.dataLocation();
+ return m_ref.code().dataLocation();
}
- size_t size()
+ size_t size() const
{
- ASSERT(m_ref.m_code.executableAddress());
- return m_ref.m_size;
+ ASSERT(m_ref.code().executableAddress());
+ return m_ref.size();
}
- ExecutablePool* getExecutablePool()
+ ExecutableMemoryHandle* getExecutableMemory()
+ {
+ return m_ref.executableMemory();
+ }
+
+ JITType jitType()
{
- return m_ref.m_executablePool.get();
+ return m_jitType;
}
// Host functions are a bit special; they have a m_code pointer but they
// do not individully ref the executable pool containing the trampoline.
- static JITCode HostFunction(CodePtr code)
+ static JITCode HostFunction(CodeRef code)
+ {
+ return JITCode(code, HostCallThunk);
+ }
+
+ void clear()
{
- return JITCode(code.dataLocation(), 0, 0);
+ m_ref.~CodeRef();
+ new (NotNull, &m_ref) CodeRef();
}
private:
- JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
- : m_ref(code, executablePool, size)
+ JITCode(PassRefPtr<ExecutableMemoryHandle> executableMemory, JITType jitType)
+ : m_ref(executableMemory)
+ , m_jitType(jitType)
{
}
CodeRef m_ref;
+ JITType m_jitType;
+#endif // ENABLE(JIT)
};
};
#endif
-
-#endif