#ifndef ExecutableAllocator_h
#define ExecutableAllocator_h
-
+#include "JITCompilationEffort.h"
+#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
+#include <wtf/MetaAllocatorHandle.h>
+#include <wtf/MetaAllocator.h>
+#include <wtf/PageAllocation.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
-#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
+#if OS(IOS)
#include <libkern/OSCacheControl.h>
+#endif
+
+#if OS(IOS) || OS(QNX)
#include <sys/mman.h>
+#endif
+
+#if CPU(MIPS) && OS(LINUX)
+#include <sys/cachectl.h>
+#endif
+
+#if CPU(SH4) && OS(LINUX)
+#include <asm/cachectl.h>
+#include <asm/unistd.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
+#if OS(WINCE)
+// From pkfuncs.h (private header file from the Platform Builder)
+#define CACHE_SYNC_ALL 0x07F
+extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
+#endif
-#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
-#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
+#define EXECUTABLE_POOL_WRITABLE false
#else
-#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE true
#endif
namespace JSC {
+class VM;
+void releaseExecutableMemory(VM&);
+
+static const unsigned jitAllocationGranule = 32;
+
inline size_t roundUpAllocationSize(size_t request, size_t granularity)
{
- if ((std::numeric_limits<size_t>::max() - granularity) <= request)
- CRASH(); // Allocation is too large
+ RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
// Round up to next page boundary
size_t size = request + (granularity - 1);
}
-#if ENABLE(ASSEMBLER)
-
namespace JSC {
-class ExecutablePool : public RefCounted<ExecutablePool> {
-private:
- struct Allocation {
- char* pages;
- size_t size;
- };
- typedef Vector<Allocation, 2> AllocationList;
+typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
-public:
- static PassRefPtr<ExecutablePool> create(size_t n)
- {
- return adoptRef(new ExecutablePool(n));
- }
-
- void* alloc(size_t n)
- {
- ASSERT(m_freePtr <= m_end);
-
- // Round 'n' up to a multiple of word size; if all allocations are of
- // word sized quantities, then all subsequent allocations will be aligned.
- n = roundUpAllocationSize(n, sizeof(void*));
-
- if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
- void* result = m_freePtr;
- m_freePtr += n;
- return result;
- }
-
- // Insufficient space to allocate in the existing pool
- // so we need allocate into a new pool
- return poolAllocate(n);
- }
-
- ~ExecutablePool()
- {
- AllocationList::const_iterator end = m_pools.end();
- for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
- ExecutablePool::systemRelease(*ptr);
- }
-
- size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
-
-private:
- static Allocation systemAlloc(size_t n);
- static void systemRelease(const Allocation& alloc);
+#if ENABLE(ASSEMBLER)
- ExecutablePool(size_t n);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+class DemandExecutableAllocator;
+#endif
- void* poolAllocate(size_t n);
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+#if CPU(ARM) || CPU(ARM64)
+static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
+#elif CPU(X86_64)
+static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
+#else
+static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
+#endif
- char* m_freePtr;
- char* m_end;
- AllocationList m_pools;
-};
+extern uintptr_t startOfFixedExecutableMemoryPool;
+#endif
class ExecutableAllocator {
- enum ProtectionSeting { Writable, Executable };
+ enum ProtectionSetting { Writable, Executable };
public:
- static size_t pageSize;
- ExecutableAllocator()
- {
- if (!pageSize)
- intializePageSize();
- m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
- }
+ ExecutableAllocator(VM&);
+ ~ExecutableAllocator();
+
+ static void initializeAllocator();
- PassRefPtr<ExecutablePool> poolForSize(size_t n)
- {
- // Try to fit in the existing small allocator
- if (n < m_smallAllocationPool->available())
- return m_smallAllocationPool;
-
- // If the request is large, we just provide a unshared allocator
- if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
- return ExecutablePool::create(n);
-
- // Create a new allocator
- RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-
- // If the new allocator will result in more free space than in
- // the current small allocator, then we will use it instead
- if ((pool->available() - n) > m_smallAllocationPool->available())
- m_smallAllocationPool = pool;
- return pool.release();
- }
+ bool isValid() const;
+
+ static bool underMemoryPressure();
+
+ static double memoryPressureMultiplier(size_t addedMemoryUsage);
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+ static void dumpProfile();
+#else
+ static void dumpProfile() { }
+#endif
+
+ PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void makeWritable(void* start, size_t size)
static void makeExecutable(void*, size_t) {}
#endif
-
-#if PLATFORM(X86) || PLATFORM(X86_64)
- static void cacheFlush(void*, size_t)
- {
- }
-#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
- static void cacheFlush(void* code, size_t size)
- {
- sys_dcache_flush(code, size);
- sys_icache_invalidate(code, size);
- }
-#else
-#error "ExecutableAllocator::cacheFlush not implemented on this platform."
-#endif
+ static size_t committedByteCount();
private:
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void reprotectRegion(void*, size_t, ProtectionSeting);
+ static void reprotectRegion(void*, size_t, ProtectionSetting);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+ // We create a MetaAllocator for each JS global object.
+ OwnPtr<DemandExecutableAllocator> m_allocator;
+ DemandExecutableAllocator* allocator() { return m_allocator.get(); }
+#endif
#endif
- RefPtr<ExecutablePool> m_smallAllocationPool;
- static void intializePageSize();
};
-inline ExecutablePool::ExecutablePool(size_t n)
-{
- size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
- Allocation mem = systemAlloc(allocSize);
- m_pools.append(mem);
- m_freePtr = mem.pages;
- if (!m_freePtr)
- CRASH(); // Failed to allocate
- m_end = m_freePtr + allocSize;
-}
-inline void* ExecutablePool::poolAllocate(size_t n)
-{
- size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
-
- Allocation result = systemAlloc(allocSize);
- if (!result.pages)
- CRASH(); // Failed to allocate
-
- ASSERT(m_end >= m_freePtr);
- if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
- // Replace allocation pool
- m_freePtr = result.pages + n;
- m_end = result.pages + allocSize;
- }
+#else
- m_pools.append(result);
- return result.pages;
-}
+#if PLATFORM(IOS)
-}
-#endif // ENABLE(ASSEMBLER)
+class ExecutableAllocator {
+public:
+ static size_t committedByteCount();
+};
+
+#endif // !PLATFORM(IOS)
+
+#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
+
+} // namespace JSC
#endif // !defined(ExecutableAllocator)