X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..refs/heads/master:/jit/ExecutableAllocator.cpp diff --git a/jit/ExecutableAllocator.cpp b/jit/ExecutableAllocator.cpp index 35531d9..bb49e73 100644 --- a/jit/ExecutableAllocator.cpp +++ b/jit/ExecutableAllocator.cpp @@ -24,71 +24,225 @@ */ #include "config.h" - #include "ExecutableAllocator.h" +#include "JSCInlines.h" + +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) +#include "CodeProfiling.h" +#include +#include +#include +#include +#include +#include +#endif + +// Uncomment to create an artificial executable memory usage limit. This limit +// is imperfect and is primarily useful for testing the VM's ability to handle +// out-of-executable-memory situations. +// #define EXECUTABLE_MEMORY_LIMIT 1000000 + #if ENABLE(ASSEMBLER) -namespace JSC { +using namespace WTF; -size_t ExecutableAllocator::pageSize = 0; +namespace JSC { #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) -void ExecutableAllocator::intializePageSize() +class DemandExecutableAllocator : public MetaAllocator { +public: + DemandExecutableAllocator() + : MetaAllocator(jitAllocationGranule) + { + std::lock_guard lock(allocatorsMutex()); + allocators().add(this); + // Don't preallocate any memory here. + } + + virtual ~DemandExecutableAllocator() + { + { + std::lock_guard lock(allocatorsMutex()); + allocators().remove(this); + } + for (unsigned i = 0; i < reservations.size(); ++i) + reservations.at(i).deallocate(); + } + + static size_t bytesAllocatedByAllAllocators() + { + size_t total = 0; + std::lock_guard lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + total += (*allocator)->bytesAllocated(); + return total; + } + + static size_t bytesCommittedByAllocactors() + { + size_t total = 0; + std::lock_guard lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + total += (*allocator)->bytesCommitted(); + return total; + } + +#if ENABLE(META_ALLOCATOR_PROFILE) + static void dumpProfileFromAllAllocators() + { + std::lock_guard lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + (*allocator)->dumpProfile(); + } +#endif + +protected: + virtual void* allocateNewSpace(size_t& numPages) + { + size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); + + ASSERT(newNumPages >= numPages); + + numPages = newNumPages; + +#ifdef EXECUTABLE_MEMORY_LIMIT + if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT) + return 0; +#endif + + PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + RELEASE_ASSERT(reservation); + + reservations.append(reservation); + + return reservation.base(); + } + + virtual void notifyNeedPage(void* page) + { + OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); + } + + virtual void notifyPageIsFree(void* page) + { + OSAllocator::decommit(page, pageSize()); + } + +private: + Vector reservations; + static HashSet& allocators() + { + DEPRECATED_DEFINE_STATIC_LOCAL(HashSet, sAllocators, ()); + return sAllocators; + } + + static std::mutex& allocatorsMutex() + { + static NeverDestroyed mutex; + + return mutex; + } +}; + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +void ExecutableAllocator::initializeAllocator() { -#if OS(SYMBIAN) && CPU(ARMV5_OR_LOWER) - // The moving memory model (as used in ARMv5 and earlier platforms) - // on Symbian OS limits the number of chunks for each process to 16. - // To mitigate this limitation increase the pagesize to allocate - // fewer, larger chunks. Set the page size to 256 Kb to compensate - // for moving memory model limitation - ExecutableAllocator::pageSize = 256 * 1024; +} #else - ExecutableAllocator::pageSize = WTF::pageSize(); -#endif +static DemandExecutableAllocator* gAllocator; + +namespace { +static inline DemandExecutableAllocator* allocator() +{ + return gAllocator; +} +} + +void ExecutableAllocator::initializeAllocator() +{ + ASSERT(!gAllocator); + gAllocator = new DemandExecutableAllocator(); + CodeProfiling::notifyAllocator(gAllocator); } +#endif -ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) +ExecutableAllocator::ExecutableAllocator(VM&) +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + : m_allocator(std::make_unique()) +#endif { - PageAllocation allocation = PageAllocation::allocate(size, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); - if (!allocation) - CRASH(); - return allocation; + ASSERT(allocator()); } -void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) +ExecutableAllocator::~ExecutableAllocator() { - allocation.deallocate(); } bool ExecutableAllocator::isValid() const { return true; } - + bool ExecutableAllocator::underMemoryPressure() { +#ifdef EXECUTABLE_MEMORY_LIMIT + return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2; +#else return false; +#endif } - + +double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) +{ + double result; +#ifdef EXECUTABLE_MEMORY_LIMIT + size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; + if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) + bytesAllocated = EXECUTABLE_MEMORY_LIMIT; + result = static_cast(EXECUTABLE_MEMORY_LIMIT) / + (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); +#else + UNUSED_PARAM(addedMemoryUsage); + result = 1.0; +#endif + if (result < 1.0) + result = 1.0; + return result; + +} + +RefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +{ + RefPtr result = allocator()->allocate(sizeInBytes, ownerUID); + RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); + return result; +} + size_t ExecutableAllocator::committedByteCount() { - return 0; -} + return DemandExecutableAllocator::bytesCommittedByAllocactors(); +} +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + DemandExecutableAllocator::dumpProfileFromAllAllocators(); +} #endif +#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#if OS(WINDOWS) || OS(SYMBIAN) +#if OS(WINDOWS) #error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." #endif void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) { - if (!pageSize) - intializePageSize(); + size_t pageSize = WTF::pageSize(); // Calculate the start of the page containing this region, // and account for this extra memory within size. @@ -106,23 +260,6 @@ void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSe #endif -#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) - -__asm void ExecutableAllocator::cacheFlush(void* code, size_t size) -{ - ARM - push {r7} - add r1, r1, r0 - mov r7, #0xf0000 - add r7, r7, #0x2 - mov r2, #0x0 - svc #0x0 - pop {r7} - bx lr -} - -#endif - } #endif // HAVE(ASSEMBLER)