X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/4e4e5a6f2694187498445a6ac6f1634ce8141119..217a6308cd6a1dc049a0bb69263bd4c91f91c4d0:/jit/ExecutableAllocator.cpp diff --git a/jit/ExecutableAllocator.cpp b/jit/ExecutableAllocator.cpp index 5e10e86..5ac6cc4 100644 --- a/jit/ExecutableAllocator.cpp +++ b/jit/ExecutableAllocator.cpp @@ -27,17 +27,221 @@ #include "ExecutableAllocator.h" +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) +#include "CodeProfiling.h" +#include +#include +#include +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +#include +#endif +#include +#include +#endif + +// Uncomment to create an artificial executable memory usage limit. This limit +// is imperfect and is primarily useful for testing the VM's ability to handle +// out-of-executable-memory situations. +// #define EXECUTABLE_MEMORY_LIMIT 1000000 + #if ENABLE(ASSEMBLER) +using namespace WTF; + namespace JSC { -size_t ExecutableAllocator::pageSize = 0; +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + +class DemandExecutableAllocator : public MetaAllocator { +public: + DemandExecutableAllocator() + : MetaAllocator(jitAllocationGranule) + { + MutexLocker lock(allocatorsMutex()); + allocators().add(this); + // Don't preallocate any memory here. + } + + virtual ~DemandExecutableAllocator() + { + { + MutexLocker lock(allocatorsMutex()); + allocators().remove(this); + } + for (unsigned i = 0; i < reservations.size(); ++i) + reservations.at(i).deallocate(); + } + + static size_t bytesAllocatedByAllAllocators() + { + size_t total = 0; + MutexLocker lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + total += (*allocator)->bytesAllocated(); + return total; + } + + static size_t bytesCommittedByAllocactors() + { + size_t total = 0; + MutexLocker lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + total += (*allocator)->bytesCommitted(); + return total; + } + +#if ENABLE(META_ALLOCATOR_PROFILE) + static void dumpProfileFromAllAllocators() + { + MutexLocker lock(allocatorsMutex()); + for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) + (*allocator)->dumpProfile(); + } +#endif + +protected: + virtual void* allocateNewSpace(size_t& numPages) + { + size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); + + ASSERT(newNumPages >= numPages); + + numPages = newNumPages; + +#ifdef EXECUTABLE_MEMORY_LIMIT + if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT) + return 0; +#endif + + PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + RELEASE_ASSERT(reservation); + + reservations.append(reservation); + + return reservation.base(); + } + + virtual void notifyNeedPage(void* page) + { + OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); + } + + virtual void notifyPageIsFree(void* page) + { + OSAllocator::decommit(page, pageSize()); + } + +private: + Vector reservations; + static HashSet& allocators() + { + DEFINE_STATIC_LOCAL(HashSet, sAllocators, ()); + return sAllocators; + } + static Mutex& allocatorsMutex() + { + DEFINE_STATIC_LOCAL(Mutex, mutex, ()); + return mutex; + } +}; + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +void ExecutableAllocator::initializeAllocator() +{ +} +#else +static DemandExecutableAllocator* gAllocator; + +namespace { +static inline DemandExecutableAllocator* allocator() +{ + return gAllocator; +} +} + +void ExecutableAllocator::initializeAllocator() +{ + ASSERT(!gAllocator); + gAllocator = new DemandExecutableAllocator(); + CodeProfiling::notifyAllocator(gAllocator); +} +#endif +ExecutableAllocator::ExecutableAllocator(VM&) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting) + : m_allocator(adoptPtr(new DemandExecutableAllocator())) +#endif +{ + ASSERT(allocator()); +} + +ExecutableAllocator::~ExecutableAllocator() { - if (!pageSize) - intializePageSize(); +} + +bool ExecutableAllocator::isValid() const +{ + return true; +} + +bool ExecutableAllocator::underMemoryPressure() +{ +#ifdef EXECUTABLE_MEMORY_LIMIT + return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2; +#else + return false; +#endif +} + +double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) +{ + double result; +#ifdef EXECUTABLE_MEMORY_LIMIT + size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; + if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) + bytesAllocated = EXECUTABLE_MEMORY_LIMIT; + result = static_cast(EXECUTABLE_MEMORY_LIMIT) / + (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); +#else + UNUSED_PARAM(addedMemoryUsage); + result = 1.0; +#endif + if (result < 1.0) + result = 1.0; + return result; + +} + +PassRefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +{ + RefPtr result = allocator()->allocate(sizeInBytes, ownerUID); + RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); + return result.release(); +} + +size_t ExecutableAllocator::committedByteCount() +{ + return DemandExecutableAllocator::bytesCommittedByAllocactors(); +} + +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + DemandExecutableAllocator::dumpProfileFromAllAllocators(); +} +#endif + +#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + +#if OS(WINDOWS) +#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." +#endif + +void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) +{ + size_t pageSize = WTF::pageSize(); // Calculate the start of the page containing this region, // and account for this extra memory within size. @@ -52,21 +256,7 @@ void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSe mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); } -#endif -#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) -__asm void ExecutableAllocator::cacheFlush(void* code, size_t size) -{ - ARM - push {r7} - add r1, r1, r0 - mov r7, #0xf0000 - add r7, r7, #0x2 - mov r2, #0x0 - svc #0x0 - pop {r7} - bx lr -} #endif }