]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - jit/ExecutableAllocatorFixedVMPool.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / jit / ExecutableAllocatorFixedVMPool.cpp
index 792e931497faaf73e1cee7e3077beaece67bd853..9afb055d47a0545d833bdadfcdb24acbae6bb1a7 100644 (file)
  */
 
 #include "config.h"
-
 #include "ExecutableAllocator.h"
 
+#include "JSCInlines.h"
+
 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
 
+#include "CodeProfiling.h"
 #include <errno.h>
-
-#include "TCSpinLock.h"
-#include <sys/mman.h>
 #include <unistd.h>
-#include <wtf/AVLTree.h>
+#include <wtf/MetaAllocator.h>
 #include <wtf/PageReservation.h>
 #include <wtf/VMTags.h>
 
+#if OS(DARWIN)
+#include <sys/mman.h>
+#endif
+
 #if OS(LINUX)
 #include <stdio.h>
 #endif
 
-    #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_JIT)
+#if PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
+// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case.
+#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
+#endif
 
 using namespace WTF;
 
 namespace JSC {
     
-#define TwoPow(n) (1ull << n)
+uintptr_t startOfFixedExecutableMemoryPool;
 
-class AllocationTableSizeClass {
+class FixedVMPoolExecutableAllocator : public MetaAllocator {
+    WTF_MAKE_FAST_ALLOCATED;
 public:
-    AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
-        : m_blockSize(blockSize)
-    {
-        ASSERT(blockSize == TwoPow(log2BlockSize));
-
-        // Calculate the number of blocks needed to hold size.
-        size_t blockMask = blockSize - 1;
-        m_blockCount = (size + blockMask) >> log2BlockSize;
-
-        // Align to the smallest power of two >= m_blockCount.
-        m_blockAlignment = 1;
-        while (m_blockAlignment < m_blockCount)
-            m_blockAlignment += m_blockAlignment;
-    }
-
-    size_t blockSize() const { return m_blockSize; }
-    size_t blockCount() const { return m_blockCount; }
-    size_t blockAlignment() const { return m_blockAlignment; }
-
-    size_t size()
-    {
-        return m_blockSize * m_blockCount;
-    }
-
-private:
-    size_t m_blockSize;
-    size_t m_blockCount;
-    size_t m_blockAlignment;
-};
-
-template<unsigned log2Entries>
-class AllocationTableLeaf {
-    typedef uint64_t BitField;
-
-public:
-    static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
-    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
-
-    static const size_t subregionSize = TwoPow(log2SubregionSize);
-    static const size_t regionSize = TwoPow(log2RegionSize);
-    static const unsigned entries = TwoPow(log2Entries);
-    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
-
-    AllocationTableLeaf()
-        : m_allocated(0)
-    {
-    }
-
-    ~AllocationTableLeaf()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() == subregionSize);
-        ASSERT(!isFull());
-
-        size_t alignment = sizeClass.blockAlignment();
-        size_t count = sizeClass.blockCount();
-        // Use this mask to check for spans of free blocks.
-        BitField mask = ((1ull << count) - 1) << (alignment - count);
-
-        // Step in units of alignment size.
-        for (unsigned i = 0; i < entries; i += alignment) {
-            if (!(m_allocated & mask)) {
-                m_allocated |= mask;
-                return (i + (alignment - count)) << log2SubregionSize;
-            }
-            mask <<= alignment;
+    FixedVMPoolExecutableAllocator()
+        : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
+    {
+        m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+        if (m_reservation) {
+            ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
+            addFreshFreeSpace(m_reservation.base(), m_reservation.size());
+            
+            startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
         }
-        return notFound;
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() == subregionSize);
-
-        size_t entry = location >> log2SubregionSize;
-        size_t count = sizeClass.blockCount();
-        BitField mask = ((1ull << count) - 1) << entry;
-
-        ASSERT((m_allocated & mask) == mask);
-        m_allocated &= ~mask;
-    }
-
-    bool isEmpty()
-    {
-        return !m_allocated;
-    }
-
-    bool isFull()
-    {
-        return !~m_allocated;
-    }
-
-    static size_t size()
-    {
-        return regionSize;
     }
 
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
-    }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
-    {
-        for (unsigned i = 0; i < indent; ++i)
-            fprintf(stderr, "    ");
-        fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
-    }
-#endif
-
-private:
-    BitField m_allocated;
-};
-
-
-template<class NextLevel>
-class LazyAllocationTable {
-public:
-    static const unsigned log2RegionSize = NextLevel::log2RegionSize;
-    static const unsigned entries = NextLevel::entries;
-
-    LazyAllocationTable()
-        : m_ptr(0)
-    {
-    }
-
-    ~LazyAllocationTable()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        if (!m_ptr)
-            m_ptr = new NextLevel();
-        return m_ptr->allocate(sizeClass);
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(m_ptr);
-        m_ptr->free(location, sizeClass);
-        if (m_ptr->isEmpty()) {
-            delete m_ptr;
-            m_ptr = 0;
-        }
-    }
-
-    bool isEmpty()
-    {
-        return !m_ptr;
-    }
-
-    bool isFull()
-    {
-        return m_ptr && m_ptr->isFull();
-    }
-
-    static size_t size()
+    virtual ~FixedVMPoolExecutableAllocator();
+    
+protected:
+    virtual void* allocateNewSpace(size_t&) override
     {
-        return NextLevel::size();
+        // We're operating in a fixed pool, so new allocation is always prohibited.
+        return 0;
     }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
+    
+    virtual void notifyNeedPage(void* page) override
     {
-        ASSERT(m_ptr);
-        m_ptr->dump(parentOffset, indent);
-    }
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+        UNUSED_PARAM(page);
+#else
+        m_reservation.commit(page, pageSize());
 #endif
-
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        return NextLevel::classForSize(size);
-    }
-
-private:
-    NextLevel* m_ptr;
-};
-
-template<class NextLevel, unsigned log2Entries>
-class AllocationTableDirectory {
-    typedef uint64_t BitField;
-
-public:
-    static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
-    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
-
-    static const size_t subregionSize = TwoPow(log2SubregionSize);
-    static const size_t regionSize = TwoPow(log2RegionSize);
-    static const unsigned entries = TwoPow(log2Entries);
-    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
-
-    AllocationTableDirectory()
-        : m_full(0)
-        , m_hasSuballocation(0)
-    {
     }
-
-    ~AllocationTableDirectory()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() <= subregionSize);
-        ASSERT(!isFull());
-
-        if (sizeClass.blockSize() < subregionSize) {
-            BitField bit = 1;
-            for (unsigned i = 0; i < entries; ++i, bit += bit) {
-                if (m_full & bit)
-                    continue;
-                size_t location = m_suballocations[i].allocate(sizeClass);
-                if (location != notFound) {
-                    // If this didn't already have a subregion, it does now!
-                    m_hasSuballocation |= bit;
-                    // Mirror the suballocation's full bit.
-                    if (m_suballocations[i].isFull())
-                        m_full |= bit;
-                    return (i * subregionSize) | location;
-                }
-            }
-            return notFound;
-        }
-
-        // A block is allocated if either it is fully allocated or contains suballocations.
-        BitField allocated = m_full | m_hasSuballocation;
-
-        size_t alignment = sizeClass.blockAlignment();
-        size_t count = sizeClass.blockCount();
-        // Use this mask to check for spans of free blocks.
-        BitField mask = ((1ull << count) - 1) << (alignment - count);
-
-        // Step in units of alignment size.
-        for (unsigned i = 0; i < entries; i += alignment) {
-            if (!(allocated & mask)) {
-                m_full |= mask;
-                return (i + (alignment - count)) << log2SubregionSize;
+    
+    virtual void notifyPageIsFree(void* page) override
+    {
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+        for (;;) {
+            int result = madvise(page, pageSize(), MADV_FREE);
+            if (!result)
+                return;
+            ASSERT(result == -1);
+            if (errno != EAGAIN) {
+                RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
+                break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
             }
-            mask <<= alignment;
-        }
-        return notFound;
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() <= subregionSize);
-
-        size_t entry = location >> log2SubregionSize;
-
-        if (sizeClass.blockSize() < subregionSize) {
-            BitField bit = 1ull << entry;
-            m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
-            // Check if the suballocation is now empty.
-            if (m_suballocations[entry].isEmpty())
-                m_hasSuballocation &= ~bit;
-            // No need to check, it clearly isn't full any more!
-            m_full &= ~bit;
-        } else {
-            size_t count = sizeClass.blockCount();
-            BitField mask = ((1ull << count) - 1) << entry;
-            ASSERT((m_full & mask) == mask);
-            ASSERT(!(m_hasSuballocation & mask));
-            m_full &= ~mask;
-        }
-    }
-
-    bool isEmpty()
-    {
-        return !(m_full | m_hasSuballocation);
-    }
-
-    bool isFull()
-    {   
-        return !~m_full;
-    }
-
-    static size_t size()
-    {
-        return regionSize;
-    }
-
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        if (size < subregionSize) {
-            AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
-            if (sizeClass.size() < NextLevel::size())
-                return sizeClass;
-        }
-        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
-    }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
-    {
-        for (unsigned i = 0; i < indent; ++i)
-            fprintf(stderr, "    ");
-        fprintf(stderr, "%08x: [", (int)parentOffset);
-        for (unsigned i = 0; i < entries; ++i) {
-            BitField bit = 1ull << i;
-            char c = m_hasSuballocation & bit
-                ? (m_full & bit ? 'N' : 'n')
-                : (m_full & bit ? 'F' : '-');
-            fprintf(stderr, "%c", c);
-        }
-        fprintf(stderr, "]\n");
-
-        for (unsigned i = 0; i < entries; ++i) {
-            BitField bit = 1ull << i;
-            size_t offset = parentOffset | (subregionSize * i);
-            if (m_hasSuballocation & bit)
-                m_suballocations[i].dump(offset, indent + 1);
         }
-    }
-#endif
-
-private:
-    NextLevel m_suballocations[entries];
-    // Subregions exist in one of four states:
-    // (1) empty (both bits clear)
-    // (2) fully allocated as a single allocation (m_full set)
-    // (3) partially allocated through suballocations (m_hasSuballocation set)
-    // (4) fully allocated through suballocations (both bits set)
-    BitField m_full;
-    BitField m_hasSuballocation;
-};
-
-
-typedef AllocationTableLeaf<6> PageTables256KB;
-typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
-typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
-typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
-
-#if CPU(ARM)
-typedef PageTables16MB FixedVMPoolPageTables;
-#elif CPU(X86_64)
-typedef PageTables1GB FixedVMPoolPageTables;
 #else
-typedef PageTables32MB FixedVMPoolPageTables;
-#endif
-
-
-class FixedVMPoolAllocator
-{
-public:
-    FixedVMPoolAllocator()
-    {
-        ASSERT(PageTables256KB::size() == 256 * 1024);
-        ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
-        ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
-        ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
-
-        m_reservation = PageReservation::reserveWithGuardPages(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-#if !ENABLE(INTERPRETER)
-        if (!isValid())
-            CRASH();
+        m_reservation.decommit(page, pageSize());
 #endif
     }
-    ExecutablePool::Allocation alloc(size_t requestedSize)
-    {
-        ASSERT(requestedSize);
-        AllocationTableSizeClass sizeClass = classForSize(requestedSize);
-        size_t size = sizeClass.size();
-        ASSERT(size);
-
-        if (size >= FixedVMPoolPageTables::size())
-            return ExecutablePool::Allocation(0, 0);
-        if (m_pages.isFull())
-            return ExecutablePool::Allocation(0, 0);
-
-        size_t offset = m_pages.allocate(sizeClass);
-        if (offset == notFound)
-            return ExecutablePool::Allocation(0, 0);
-
-        void* pointer = offsetToPointer(offset);
-        m_reservation.commit(pointer, size);
-        return ExecutablePool::Allocation(pointer, size);
-    }
-
-    void free(ExecutablePool::Allocation allocation)
-    {
-        void* pointer = allocation.base();
-        size_t size = allocation.size();
-        ASSERT(size);
-
-        m_reservation.decommit(pointer, size);
-
-        AllocationTableSizeClass sizeClass = classForSize(size);
-        ASSERT(sizeClass.size() == size);
-        m_pages.free(pointerToOffset(pointer), sizeClass);
-    }
-
-    size_t allocated()
-    {
-        return m_reservation.committed();
-    }
-
-    bool isValid() const
-    {
-        return !!m_reservation;
-    }
 
 private:
-    AllocationTableSizeClass classForSize(size_t size)
-    {
-        return FixedVMPoolPageTables::classForSize(size);
-    }
-
-    void* offsetToPointer(size_t offset)
-    {
-        return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
-    }
-
-    size_t pointerToOffset(void* pointer)
-    {
-        return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
-    }
-
     PageReservation m_reservation;
-    FixedVMPoolPageTables m_pages;
 };
 
+static FixedVMPoolExecutableAllocator* allocator;
 
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-static FixedVMPoolAllocator* allocator = 0;
+void ExecutableAllocator::initializeAllocator()
+{
+    ASSERT(!allocator);
+    allocator = new FixedVMPoolExecutableAllocator();
+    CodeProfiling::notifyAllocator(allocator);
+}
 
+ExecutableAllocator::ExecutableAllocator(VM&)
+{
+    ASSERT(allocator);
+}
 
-size_t ExecutableAllocator::committedByteCount()
+ExecutableAllocator::~ExecutableAllocator()
 {
-    SpinLockHolder lockHolder(&spinlock);
-    return allocator ? allocator->allocated() : 0;
-}   
+}
 
-void ExecutableAllocator::intializePageSize()
+FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
 {
-    ExecutableAllocator::pageSize = getpagesize();
+    m_reservation.deallocate();
 }
 
 bool ExecutableAllocator::isValid() const
 {
-    SpinLockHolder lock_holder(&spinlock);
-    if (!allocator)
-        allocator = new FixedVMPoolAllocator();
-    return allocator->isValid();
+    return !!allocator->bytesReserved();
 }
 
 bool ExecutableAllocator::underMemoryPressure()
 {
-    // Technically we should take the spin lock here, but we don't care if we get stale data.
-    // This is only really a heuristic anyway.
-    return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
+    MetaAllocator::Statistics statistics = allocator->currentStatistics();
+    return statistics.bytesAllocated > statistics.bytesReserved / 2;
 }
 
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
 {
-    SpinLockHolder lock_holder(&spinlock);
-    ASSERT(allocator);
-    return allocator->alloc(size);
+    MetaAllocator::Statistics statistics = allocator->currentStatistics();
+    ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
+    size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
+    if (bytesAllocated >= statistics.bytesReserved)
+        bytesAllocated = statistics.bytesReserved;
+    double result = 1.0;
+    size_t divisor = statistics.bytesReserved - bytesAllocated;
+    if (divisor)
+        result = static_cast<double>(statistics.bytesReserved) / divisor;
+    if (result < 1.0)
+        result = 1.0;
+    return result;
 }
 
-void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) 
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
 {
-    SpinLockHolder lock_holder(&spinlock);
-    ASSERT(allocator);
-    allocator->free(allocation);
+    RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
+    if (!result) {
+        if (effort == JITCompilationCanFail)
+            return result;
+        releaseExecutableMemory(vm);
+        result = allocator->allocate(sizeInBytes, ownerUID);
+        RELEASE_ASSERT(result);
+    }
+    return result.release();
 }
 
+size_t ExecutableAllocator::committedByteCount()
+{
+    return allocator->bytesCommitted();
 }
 
-
-#endif // HAVE(ASSEMBLER)
-
-#if !ENABLE(JIT)
-// FIXME: Needed to satisfy JavaScriptCore.exp requirements when building only the interpreter.
-namespace JSC {
-size_t ExecutableAllocator::committedByteCount()
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
 {
-    return 0;
+    allocator->dumpProfile();
+}
+#endif
+
 }
-} // namespace JSC
-#endif // !ENABLE(JIT)
+
+
+#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)