]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - jit/ExecutableAllocator.h
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / jit / ExecutableAllocator.h
index 62d708d8e69d8c8b47099b1e83c9a36a14583372..eba096848482587c4595912e52269786f65669fe 100644 (file)
 
 #ifndef ExecutableAllocator_h
 #define ExecutableAllocator_h
-
+#include "JITCompilationEffort.h"
 #include <stddef.h> // for ptrdiff_t
 #include <limits>
 #include <wtf/Assertions.h>
+#include <wtf/MetaAllocatorHandle.h>
+#include <wtf/MetaAllocator.h>
+#include <wtf/PageAllocation.h>
 #include <wtf/PassRefPtr.h>
 #include <wtf/RefCounted.h>
-#include <wtf/UnusedParam.h>
 #include <wtf/Vector.h>
 
-#if OS(IPHONE_OS)
+#if OS(IOS)
 #include <libkern/OSCacheControl.h>
-#include <sys/mman.h>
 #endif
 
-#if OS(SYMBIAN)
-#include <e32std.h>
+#if OS(IOS) || OS(QNX)
+#include <sys/mman.h>
 #endif
 
 #if CPU(MIPS) && OS(LINUX)
 #include <sys/cachectl.h>
 #endif
 
+#if CPU(SH4) && OS(LINUX)
+#include <asm/cachectl.h>
+#include <asm/unistd.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
 #if OS(WINCE)
 // From pkfuncs.h (private header file from the Platform Builder)
 #define CACHE_SYNC_ALL 0x07F
 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
 #endif
 
-#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
-#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
 
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
+#define EXECUTABLE_POOL_WRITABLE false
 #else
-#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE true
 #endif
 
 namespace JSC {
 
+class VM;
+void releaseExecutableMemory(VM&);
+
+static const unsigned jitAllocationGranule = 32;
+
 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
 {
-    if ((std::numeric_limits<size_t>::max() - granularity) <= request)
-        CRASH(); // Allocation is too large
+    RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
     
     // Round up to next page boundary
     size_t size = request + (granularity - 1);
@@ -80,116 +91,50 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
 
 }
 
-#if ENABLE(ASSEMBLER)
-
 namespace JSC {
 
-class ExecutablePool : public RefCounted<ExecutablePool> {
-private:
-    struct Allocation {
-        char* pages;
-        size_t size;
-#if OS(SYMBIAN)
-        RChunk* chunk;
-#endif
-    };
-    typedef Vector<Allocation, 2> AllocationList;
-
-public:
-    static PassRefPtr<ExecutablePool> create(size_t n)
-    {
-        return adoptRef(new ExecutablePool(n));
-    }
-
-    void* alloc(size_t n)
-    {
-        ASSERT(m_freePtr <= m_end);
-
-        // Round 'n' up to a multiple of word size; if all allocations are of
-        // word sized quantities, then all subsequent allocations will be aligned.
-        n = roundUpAllocationSize(n, sizeof(void*));
-
-        if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
-            void* result = m_freePtr;
-            m_freePtr += n;
-            return result;
-        }
-
-        // Insufficient space to allocate in the existing pool
-        // so we need allocate into a new pool
-        return poolAllocate(n);
-    }
-    
-    void tryShrink(void* allocation, size_t oldSize, size_t newSize)
-    {
-        if (static_cast<char*>(allocation) + oldSize != m_freePtr)
-            return;
-        m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
-    }
-
-    ~ExecutablePool()
-    {
-        AllocationList::const_iterator end = m_pools.end();
-        for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
-            ExecutablePool::systemRelease(*ptr);
-    }
+typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
 
-    size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
-
-    static bool underMemoryPressure();
-
-private:
-    static Allocation systemAlloc(size_t n);
-    static void systemRelease(const Allocation& alloc);
+#if ENABLE(ASSEMBLER)
 
-    ExecutablePool(size_t n);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+class DemandExecutableAllocator;
+#endif
 
-    void* poolAllocate(size_t n);
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+#if CPU(ARM) || CPU(ARM64)
+static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
+#elif CPU(X86_64)
+static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
+#else
+static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
+#endif
 
-    char* m_freePtr;
-    char* m_end;
-    AllocationList m_pools;
-};
+extern uintptr_t startOfFixedExecutableMemoryPool;
+#endif
 
 class ExecutableAllocator {
-    enum ProtectionSeting { Writable, Executable };
+    enum ProtectionSetting { Writable, Executable };
 
 public:
-    static size_t pageSize;
-    ExecutableAllocator()
-    {
-        if (!pageSize)
-            intializePageSize();
-        if (isValid())
-            m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-#if !ENABLE(INTERPRETER)
-        else
-            CRASH();
-#endif
-    }
+    ExecutableAllocator(VM&);
+    ~ExecutableAllocator();
+    
+    static void initializeAllocator();
 
     bool isValid() const;
+
+    static bool underMemoryPressure();
     
-    PassRefPtr<ExecutablePool> poolForSize(size_t n)
-    {
-        // Try to fit in the existing small allocator
-        ASSERT(m_smallAllocationPool);
-        if (n < m_smallAllocationPool->available())
-            return m_smallAllocationPool;
-
-        // If the request is large, we just provide a unshared allocator
-        if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
-            return ExecutablePool::create(n);
-
-        // Create a new allocator
-        RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-
-        // If the new allocator will result in more free space than in
-        // the current small allocator, then we will use it instead
-        if ((pool->available() - n) > m_smallAllocationPool->available())
-            m_smallAllocationPool = pool;
-        return pool.release();
-    }
+    static double memoryPressureMultiplier(size_t addedMemoryUsage);
+    
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    static void dumpProfile();
+#else
+    static void dumpProfile() { }
+#endif
+
+    PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
 
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
     static void makeWritable(void* start, size_t size)
@@ -206,133 +151,35 @@ public:
     static void makeExecutable(void*, size_t) {}
 #endif
 
-
-#if CPU(X86) || CPU(X86_64)
-    static void cacheFlush(void*, size_t)
-    {
-    }
-#elif CPU(MIPS)
-    static void cacheFlush(void* code, size_t size)
-    {
-#if COMPILER(GCC) && (GCC_VERSION >= 40300)
-#if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
-        int lineSize;
-        asm("rdhwr %0, $1" : "=r" (lineSize));
-        //
-        // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
-        // mips_expand_synci_loop that may execute synci one more time.
-        // "start" points to the fisrt byte of the cache line.
-        // "end" points to the last byte of the line before the last cache line.
-        // Because size is always a multiple of 4, this is safe to set
-        // "end" to the last byte.
-        //
-        intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
-        intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
-        __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
-#else
-        intptr_t end = reinterpret_cast<intptr_t>(code) + size;
-        __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
-#endif
-#else
-        _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
-#endif
-    }
-#elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
-    static void cacheFlush(void* code, size_t size)
-    {
-        sys_dcache_flush(code, size);
-        sys_icache_invalidate(code, size);
-    }
-#elif CPU(ARM_THUMB2) && OS(LINUX)
-    static void cacheFlush(void* code, size_t size)
-    {
-        asm volatile (
-            "push    {r7}\n"
-            "mov     r0, %0\n"
-            "mov     r1, %1\n"
-            "movw    r7, #0x2\n"
-            "movt    r7, #0xf\n"
-            "movs    r2, #0x0\n"
-            "svc     0x0\n"
-            "pop     {r7}\n"
-            :
-            : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
-            : "r0", "r1", "r2");
-    }
-#elif OS(SYMBIAN)
-    static void cacheFlush(void* code, size_t size)
-    {
-        User::IMB_Range(code, static_cast<char*>(code) + size);
-    }
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
-    static __asm void cacheFlush(void* code, size_t size);
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
-    static void cacheFlush(void* code, size_t size)
-    {
-        asm volatile (
-            "push    {r7}\n"
-            "mov     r0, %0\n"
-            "mov     r1, %1\n"
-            "mov     r7, #0xf0000\n"
-            "add     r7, r7, #0x2\n"
-            "mov     r2, #0x0\n"
-            "svc     0x0\n"
-            "pop     {r7}\n"
-            :
-            : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
-            : "r0", "r1", "r2");
-    }
-#elif OS(WINCE)
-    static void cacheFlush(void* code, size_t size)
-    {
-        CacheRangeFlush(code, size, CACHE_SYNC_ALL);
-    }
-#else
-    #error "The cacheFlush support is missing on this platform."
-#endif
+    static size_t committedByteCount();
 
 private:
 
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-    static void reprotectRegion(void*, size_t, ProtectionSeting);
+    static void reprotectRegion(void*, size_t, ProtectionSetting);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+    // We create a MetaAllocator for each JS global object.
+    OwnPtr<DemandExecutableAllocator> m_allocator;
+    DemandExecutableAllocator* allocator() { return m_allocator.get(); }
+#endif
 #endif
 
-    RefPtr<ExecutablePool> m_smallAllocationPool;
-    static void intializePageSize();
 };
 
-inline ExecutablePool::ExecutablePool(size_t n)
-{
-    size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
-    Allocation mem = systemAlloc(allocSize);
-    m_pools.append(mem);
-    m_freePtr = mem.pages;
-    if (!m_freePtr)
-        CRASH(); // Failed to allocate
-    m_end = m_freePtr + allocSize;
-}
 
-inline void* ExecutablePool::poolAllocate(size_t n)
-{
-    size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
-    
-    Allocation result = systemAlloc(allocSize);
-    if (!result.pages)
-        CRASH(); // Failed to allocate
-    
-    ASSERT(m_end >= m_freePtr);
-    if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
-        // Replace allocation pool
-        m_freePtr = result.pages + n;
-        m_end = result.pages + allocSize;
-    }
+#else
 
-    m_pools.append(result);
-    return result.pages;
-}
+#if PLATFORM(IOS)
 
-}
+class ExecutableAllocator {
+public: 
+    static size_t committedByteCount();
+};
+
+#endif // !PLATFORM(IOS)
+
+#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
 
-#endif // ENABLE(ASSEMBLER)
+} // namespace JSC
 
 #endif // !defined(ExecutableAllocator)