X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..1981f5dfe8d77d97469d20652f712a09400c48ed:/jit/ExecutableAllocatorFixedVMPool.cpp diff --git a/jit/ExecutableAllocatorFixedVMPool.cpp b/jit/ExecutableAllocatorFixedVMPool.cpp index 792e931..65a3a73 100644 --- a/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/jit/ExecutableAllocatorFixedVMPool.cpp @@ -29,12 +29,11 @@ #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) +#include "CodeProfiling.h" #include <errno.h> - -#include "TCSpinLock.h" #include <sys/mman.h> #include <unistd.h> -#include <wtf/AVLTree.h> +#include <wtf/MetaAllocator.h> #include <wtf/PageReservation.h> #include <wtf/VMTags.h> @@ -48,491 +47,145 @@ using namespace WTF; namespace JSC { -#define TwoPow(n) (1ull << n) - -class AllocationTableSizeClass { -public: - AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize) - : m_blockSize(blockSize) - { - ASSERT(blockSize == TwoPow(log2BlockSize)); - - // Calculate the number of blocks needed to hold size. - size_t blockMask = blockSize - 1; - m_blockCount = (size + blockMask) >> log2BlockSize; - - // Align to the smallest power of two >= m_blockCount. - m_blockAlignment = 1; - while (m_blockAlignment < m_blockCount) - m_blockAlignment += m_blockAlignment; - } - - size_t blockSize() const { return m_blockSize; } - size_t blockCount() const { return m_blockCount; } - size_t blockAlignment() const { return m_blockAlignment; } - - size_t size() - { - return m_blockSize * m_blockCount; - } - -private: - size_t m_blockSize; - size_t m_blockCount; - size_t m_blockAlignment; -}; - -template<unsigned log2Entries> -class AllocationTableLeaf { - typedef uint64_t BitField; - -public: - static const unsigned log2SubregionSize = 12; // 2^12 == pagesize - static const unsigned log2RegionSize = log2SubregionSize + log2Entries; - - static const size_t subregionSize = TwoPow(log2SubregionSize); - static const size_t regionSize = TwoPow(log2RegionSize); - static const unsigned entries = TwoPow(log2Entries); - COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField); - - AllocationTableLeaf() - : m_allocated(0) - { - } - - ~AllocationTableLeaf() - { - ASSERT(isEmpty()); - } - - size_t allocate(AllocationTableSizeClass& sizeClass) - { - ASSERT(sizeClass.blockSize() == subregionSize); - ASSERT(!isFull()); - - size_t alignment = sizeClass.blockAlignment(); - size_t count = sizeClass.blockCount(); - // Use this mask to check for spans of free blocks. - BitField mask = ((1ull << count) - 1) << (alignment - count); - - // Step in units of alignment size. - for (unsigned i = 0; i < entries; i += alignment) { - if (!(m_allocated & mask)) { - m_allocated |= mask; - return (i + (alignment - count)) << log2SubregionSize; - } - mask <<= alignment; - } - return notFound; - } - - void free(size_t location, AllocationTableSizeClass& sizeClass) - { - ASSERT(sizeClass.blockSize() == subregionSize); - - size_t entry = location >> log2SubregionSize; - size_t count = sizeClass.blockCount(); - BitField mask = ((1ull << count) - 1) << entry; - - ASSERT((m_allocated & mask) == mask); - m_allocated &= ~mask; - } - - bool isEmpty() - { - return !m_allocated; - } - - bool isFull() - { - return !~m_allocated; - } - - static size_t size() - { - return regionSize; - } - - static AllocationTableSizeClass classForSize(size_t size) - { - return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); - } - -#ifndef NDEBUG - void dump(size_t parentOffset = 0, unsigned indent = 0) - { - for (unsigned i = 0; i < indent; ++i) - fprintf(stderr, " "); - fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated); - } +#if CPU(ARM) +static const size_t fixedPoolSize = 16 * 1024 * 1024; +#elif CPU(X86_64) +static const size_t fixedPoolSize = 1024 * 1024 * 1024; +#else +static const size_t fixedPoolSize = 32 * 1024 * 1024; #endif -private: - BitField m_allocated; -}; - - -template<class NextLevel> -class LazyAllocationTable { +class FixedVMPoolExecutableAllocator : public MetaAllocator { public: - static const unsigned log2RegionSize = NextLevel::log2RegionSize; - static const unsigned entries = NextLevel::entries; - - LazyAllocationTable() - : m_ptr(0) - { - } - - ~LazyAllocationTable() - { - ASSERT(isEmpty()); - } - - size_t allocate(AllocationTableSizeClass& sizeClass) - { - if (!m_ptr) - m_ptr = new NextLevel(); - return m_ptr->allocate(sizeClass); - } - - void free(size_t location, AllocationTableSizeClass& sizeClass) + FixedVMPoolExecutableAllocator() + : MetaAllocator(32) // round up all allocations to 32 bytes { - ASSERT(m_ptr); - m_ptr->free(location, sizeClass); - if (m_ptr->isEmpty()) { - delete m_ptr; - m_ptr = 0; + m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); +#if !(ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) + if (!m_reservation) + CRASH(); +#endif + if (m_reservation) { + ASSERT(m_reservation.size() == fixedPoolSize); + addFreshFreeSpace(m_reservation.base(), m_reservation.size()); } } - - bool isEmpty() - { - return !m_ptr; - } - - bool isFull() - { - return m_ptr && m_ptr->isFull(); - } - - static size_t size() + +protected: + virtual void* allocateNewSpace(size_t&) { - return NextLevel::size(); + // We're operating in a fixed pool, so new allocation is always prohibited. + return 0; } - -#ifndef NDEBUG - void dump(size_t parentOffset = 0, unsigned indent = 0) + + virtual void notifyNeedPage(void* page) { - ASSERT(m_ptr); - m_ptr->dump(parentOffset, indent); - } +#if OS(DARWIN) + UNUSED_PARAM(page); +#else + m_reservation.commit(page, pageSize()); #endif - - static AllocationTableSizeClass classForSize(size_t size) - { - return NextLevel::classForSize(size); } - -private: - NextLevel* m_ptr; -}; - -template<class NextLevel, unsigned log2Entries> -class AllocationTableDirectory { - typedef uint64_t BitField; - -public: - static const unsigned log2SubregionSize = NextLevel::log2RegionSize; - static const unsigned log2RegionSize = log2SubregionSize + log2Entries; - - static const size_t subregionSize = TwoPow(log2SubregionSize); - static const size_t regionSize = TwoPow(log2RegionSize); - static const unsigned entries = TwoPow(log2Entries); - COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField); - - AllocationTableDirectory() - : m_full(0) - , m_hasSuballocation(0) - { - } - - ~AllocationTableDirectory() - { - ASSERT(isEmpty()); - } - - size_t allocate(AllocationTableSizeClass& sizeClass) - { - ASSERT(sizeClass.blockSize() <= subregionSize); - ASSERT(!isFull()); - - if (sizeClass.blockSize() < subregionSize) { - BitField bit = 1; - for (unsigned i = 0; i < entries; ++i, bit += bit) { - if (m_full & bit) - continue; - size_t location = m_suballocations[i].allocate(sizeClass); - if (location != notFound) { - // If this didn't already have a subregion, it does now! - m_hasSuballocation |= bit; - // Mirror the suballocation's full bit. - if (m_suballocations[i].isFull()) - m_full |= bit; - return (i * subregionSize) | location; - } - } - return notFound; - } - - // A block is allocated if either it is fully allocated or contains suballocations. - BitField allocated = m_full | m_hasSuballocation; - - size_t alignment = sizeClass.blockAlignment(); - size_t count = sizeClass.blockCount(); - // Use this mask to check for spans of free blocks. - BitField mask = ((1ull << count) - 1) << (alignment - count); - - // Step in units of alignment size. - for (unsigned i = 0; i < entries; i += alignment) { - if (!(allocated & mask)) { - m_full |= mask; - return (i + (alignment - count)) << log2SubregionSize; + + virtual void notifyPageIsFree(void* page) + { +#if OS(DARWIN) + for (;;) { + int result = madvise(page, pageSize(), MADV_FREE); + if (!result) + return; + ASSERT(result == -1); + if (errno != EAGAIN) { + ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure. + break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway. } - mask <<= alignment; - } - return notFound; - } - - void free(size_t location, AllocationTableSizeClass& sizeClass) - { - ASSERT(sizeClass.blockSize() <= subregionSize); - - size_t entry = location >> log2SubregionSize; - - if (sizeClass.blockSize() < subregionSize) { - BitField bit = 1ull << entry; - m_suballocations[entry].free(location & (subregionSize - 1), sizeClass); - // Check if the suballocation is now empty. - if (m_suballocations[entry].isEmpty()) - m_hasSuballocation &= ~bit; - // No need to check, it clearly isn't full any more! - m_full &= ~bit; - } else { - size_t count = sizeClass.blockCount(); - BitField mask = ((1ull << count) - 1) << entry; - ASSERT((m_full & mask) == mask); - ASSERT(!(m_hasSuballocation & mask)); - m_full &= ~mask; - } - } - - bool isEmpty() - { - return !(m_full | m_hasSuballocation); - } - - bool isFull() - { - return !~m_full; - } - - static size_t size() - { - return regionSize; - } - - static AllocationTableSizeClass classForSize(size_t size) - { - if (size < subregionSize) { - AllocationTableSizeClass sizeClass = NextLevel::classForSize(size); - if (sizeClass.size() < NextLevel::size()) - return sizeClass; - } - return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); - } - -#ifndef NDEBUG - void dump(size_t parentOffset = 0, unsigned indent = 0) - { - for (unsigned i = 0; i < indent; ++i) - fprintf(stderr, " "); - fprintf(stderr, "%08x: [", (int)parentOffset); - for (unsigned i = 0; i < entries; ++i) { - BitField bit = 1ull << i; - char c = m_hasSuballocation & bit - ? (m_full & bit ? 'N' : 'n') - : (m_full & bit ? 'F' : '-'); - fprintf(stderr, "%c", c); - } - fprintf(stderr, "]\n"); - - for (unsigned i = 0; i < entries; ++i) { - BitField bit = 1ull << i; - size_t offset = parentOffset | (subregionSize * i); - if (m_hasSuballocation & bit) - m_suballocations[i].dump(offset, indent + 1); } - } -#endif - -private: - NextLevel m_suballocations[entries]; - // Subregions exist in one of four states: - // (1) empty (both bits clear) - // (2) fully allocated as a single allocation (m_full set) - // (3) partially allocated through suballocations (m_hasSuballocation set) - // (4) fully allocated through suballocations (both bits set) - BitField m_full; - BitField m_hasSuballocation; -}; - - -typedef AllocationTableLeaf<6> PageTables256KB; -typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB; -typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB; -typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB; - -#if CPU(ARM) -typedef PageTables16MB FixedVMPoolPageTables; -#elif CPU(X86_64) -typedef PageTables1GB FixedVMPoolPageTables; #else -typedef PageTables32MB FixedVMPoolPageTables; + m_reservation.decommit(page, pageSize()); #endif - - -class FixedVMPoolAllocator -{ -public: - FixedVMPoolAllocator() - { - ASSERT(PageTables256KB::size() == 256 * 1024); - ASSERT(PageTables16MB::size() == 16 * 1024 * 1024); - ASSERT(PageTables32MB::size() == 32 * 1024 * 1024); - ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024); - - m_reservation = PageReservation::reserveWithGuardPages(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); -#if !ENABLE(INTERPRETER) - if (!isValid()) - CRASH(); -#endif - } - - ExecutablePool::Allocation alloc(size_t requestedSize) - { - ASSERT(requestedSize); - AllocationTableSizeClass sizeClass = classForSize(requestedSize); - size_t size = sizeClass.size(); - ASSERT(size); - - if (size >= FixedVMPoolPageTables::size()) - return ExecutablePool::Allocation(0, 0); - if (m_pages.isFull()) - return ExecutablePool::Allocation(0, 0); - - size_t offset = m_pages.allocate(sizeClass); - if (offset == notFound) - return ExecutablePool::Allocation(0, 0); - - void* pointer = offsetToPointer(offset); - m_reservation.commit(pointer, size); - return ExecutablePool::Allocation(pointer, size); - } - - void free(ExecutablePool::Allocation allocation) - { - void* pointer = allocation.base(); - size_t size = allocation.size(); - ASSERT(size); - - m_reservation.decommit(pointer, size); - - AllocationTableSizeClass sizeClass = classForSize(size); - ASSERT(sizeClass.size() == size); - m_pages.free(pointerToOffset(pointer), sizeClass); - } - - size_t allocated() - { - return m_reservation.committed(); - } - - bool isValid() const - { - return !!m_reservation; } private: - AllocationTableSizeClass classForSize(size_t size) - { - return FixedVMPoolPageTables::classForSize(size); - } - - void* offsetToPointer(size_t offset) - { - return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset); - } - - size_t pointerToOffset(void* pointer) - { - return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base()); - } - PageReservation m_reservation; - FixedVMPoolPageTables m_pages; }; +static FixedVMPoolExecutableAllocator* allocator; -static SpinLock spinlock = SPINLOCK_INITIALIZER; -static FixedVMPoolAllocator* allocator = 0; - +void ExecutableAllocator::initializeAllocator() +{ + ASSERT(!allocator); + allocator = new FixedVMPoolExecutableAllocator(); + CodeProfiling::notifyAllocator(allocator); +} -size_t ExecutableAllocator::committedByteCount() +ExecutableAllocator::ExecutableAllocator(JSGlobalData&) { - SpinLockHolder lockHolder(&spinlock); - return allocator ? allocator->allocated() : 0; -} + ASSERT(allocator); +} -void ExecutableAllocator::intializePageSize() +ExecutableAllocator::~ExecutableAllocator() { - ExecutableAllocator::pageSize = getpagesize(); } bool ExecutableAllocator::isValid() const { - SpinLockHolder lock_holder(&spinlock); - if (!allocator) - allocator = new FixedVMPoolAllocator(); - return allocator->isValid(); + return !!allocator->bytesReserved(); } bool ExecutableAllocator::underMemoryPressure() { - // Technically we should take the spin lock here, but we don't care if we get stale data. - // This is only really a heuristic anyway. - return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2)); + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + return statistics.bytesAllocated > statistics.bytesReserved / 2; } -ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) +double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) { - SpinLockHolder lock_holder(&spinlock); - ASSERT(allocator); - return allocator->alloc(size); + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); + size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; + if (bytesAllocated >= statistics.bytesReserved) + bytesAllocated = statistics.bytesReserved; + double result = 1.0; + size_t divisor = statistics.bytesReserved - bytesAllocated; + if (divisor) + result = static_cast<double>(statistics.bytesReserved) / divisor; + if (result < 1.0) + result = 1.0; + return result; } -void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) +PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { - SpinLockHolder lock_holder(&spinlock); - ASSERT(allocator); - allocator->free(allocation); + RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID); + if (!result) { + if (effort == JITCompilationCanFail) + return result; + releaseExecutableMemory(globalData); + result = allocator->allocate(sizeInBytes, ownerUID); + if (!result) + CRASH(); + } + return result.release(); } +size_t ExecutableAllocator::committedByteCount() +{ + return allocator->bytesCommitted(); +} + +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + allocator->dumpProfile(); +} +#endif + } -#endif // HAVE(ASSEMBLER) +#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED) -#if !ENABLE(JIT) +#if !ENABLE(ASSEMBLER) // FIXME: Needed to satisfy JavaScriptCore.exp requirements when building only the interpreter. namespace JSC { size_t ExecutableAllocator::committedByteCount()