X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/ba379fdc102753d6be2c4d937058fe40257329fe..6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174:/assembler/MacroAssemblerCodeRef.h diff --git a/assembler/MacroAssemblerCodeRef.h b/assembler/MacroAssemblerCodeRef.h index 341a7ff..ac62c42 100644 --- a/assembler/MacroAssemblerCodeRef.h +++ b/assembler/MacroAssemblerCodeRef.h @@ -26,18 +26,14 @@ #ifndef MacroAssemblerCodeRef_h #define MacroAssemblerCodeRef_h -#include - #include "ExecutableAllocator.h" -#include "PassRefPtr.h" -#include "RefPtr.h" -#include "UnusedParam.h" - -#if ENABLE(ASSEMBLER) +#include +#include +#include // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). -#if PLATFORM_ARM_ARCH(7) +#if CPU(ARM_THUMB2) // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded // into the processor are decorated with the bottom bit set, indicating that this is // thumb code (as oposed to 32-bit traditional ARM). The first test checks for both @@ -54,6 +50,32 @@ #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes! #endif +#if CPU(X86) && OS(WINDOWS) +#define CALLING_CONVENTION_IS_STDCALL 1 +#ifndef CDECL +#if COMPILER(MSVC) +#define CDECL __cdecl +#else +#define CDECL __attribute__ ((__cdecl)) +#endif // COMPILER(MSVC) +#endif // CDECL +#else +#define CALLING_CONVENTION_IS_STDCALL 0 +#endif + +#if CPU(X86) +#define HAS_FASTCALL_CALLING_CONVENTION 1 +#ifndef FASTCALL +#if COMPILER(MSVC) +#define FASTCALL __fastcall +#else +#define FASTCALL __attribute__ ((fastcall)) +#endif // COMPILER(MSVC) +#endif // FASTCALL +#else +#define HAS_FASTCALL_CALLING_CONVENTION 0 +#endif // CPU(X86) + namespace JSC { // FunctionPtr: @@ -67,9 +89,125 @@ public: { } + template + FunctionPtr(returnType(*value)()) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType(*value)(argType1)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType(*value)(argType1, argType2)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType(*value)(argType1, argType2, argType3)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + +// MSVC doesn't seem to treat functions with different calling conventions as +// different types; these methods already defined for fastcall, below. +#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) + + template + FunctionPtr(returnType (CDECL *value)()) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (CDECL *value)(argType1)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (CDECL *value)(argType1, argType2)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } +#endif + +#if HAS_FASTCALL_CALLING_CONVENTION + + template + FunctionPtr(returnType (FASTCALL *value)()) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (FASTCALL *value)(argType1)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (FASTCALL *value)(argType1, argType2)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } + + template + FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } +#endif + template explicit FunctionPtr(FunctionType* value) - : m_value(reinterpret_cast(value)) + // Using a C-ctyle cast here to avoid compiler error on RVTC: + // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers + // (I guess on RVTC function pointers have a different constness to GCC/MSVC?) + : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } @@ -124,7 +262,7 @@ public: } explicit MacroAssemblerCodePtr(void* value) -#if PLATFORM_ARM_ARCH(7) +#if CPU(ARM_THUMB2) // Decorate the pointer as a thumb code pointer. : m_value(reinterpret_cast(value) + 1) #else @@ -133,7 +271,19 @@ public: { ASSERT_VALID_CODE_POINTER(m_value); } + + static MacroAssemblerCodePtr createFromExecutableAddress(void* value) + { + ASSERT_VALID_CODE_POINTER(value); + MacroAssemblerCodePtr result; + result.m_value = value; + return result; + } + static MacroAssemblerCodePtr createLLIntCodePtr(void (*function)()) + { + return createFromExecutableAddress(bitwise_cast(function)); + } explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) : m_value(ra.value()) { @@ -141,14 +291,14 @@ public: } void* executableAddress() const { return m_value; } -#if PLATFORM_ARM_ARCH(7) +#if CPU(ARM_THUMB2) // To use this pointer as a data address remove the decoration. void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast(m_value) - 1; } #else void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } #endif - bool operator!() + bool operator!() const { return !m_value; } @@ -163,26 +313,67 @@ private: // pointer to the code, and a ref pointer to the pool from within which it // was allocated. class MacroAssemblerCodeRef { +private: + // This is private because it's dangerous enough that we want uses of it + // to be easy to find - hence the static create method below. + explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr) + : m_codePtr(codePtr) + { + ASSERT(m_codePtr); + } + public: MacroAssemblerCodeRef() - : m_size(0) { } - MacroAssemblerCodeRef(void* code, PassRefPtr executablePool, size_t size) - : m_code(code) - , m_executablePool(executablePool) - , m_size(size) + MacroAssemblerCodeRef(PassRefPtr executableMemory) + : m_codePtr(executableMemory->start()) + , m_executableMemory(executableMemory) { + ASSERT(m_executableMemory->isManaged()); + ASSERT(m_executableMemory->start()); + ASSERT(m_codePtr); } + + // Use this only when you know that the codePtr refers to code that is + // already being kept alive through some other means. Typically this means + // that codePtr is immortal. + static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr) + { + return MacroAssemblerCodeRef(codePtr); + } + + // Helper for creating self-managed code refs from LLInt. + static MacroAssemblerCodeRef createLLIntCodeRef(void (*function)()) + { + return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast(function))); + } + + ExecutableMemoryHandle* executableMemory() const + { + return m_executableMemory.get(); + } + + MacroAssemblerCodePtr code() const + { + return m_codePtr; + } + + size_t size() const + { + if (!m_executableMemory) + return 0; + return m_executableMemory->sizeInBytes(); + } + + bool operator!() const { return !m_codePtr; } - MacroAssemblerCodePtr m_code; - RefPtr m_executablePool; - size_t m_size; +private: + MacroAssemblerCodePtr m_codePtr; + RefPtr m_executableMemory; }; } // namespace JSC -#endif // ENABLE(ASSEMBLER) - #endif // MacroAssemblerCodeRef_h