+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
+class DemandExecutableAllocator : public MetaAllocator {
+public:
+ DemandExecutableAllocator()
+ : MetaAllocator(jitAllocationGranule)
+ {
+ std::lock_guard<std::mutex> lock(allocatorsMutex());
+ allocators().add(this);
+ // Don't preallocate any memory here.
+ }
+
+ virtual ~DemandExecutableAllocator()
+ {
+ {
+ std::lock_guard<std::mutex> lock(allocatorsMutex());
+ allocators().remove(this);
+ }
+ for (unsigned i = 0; i < reservations.size(); ++i)
+ reservations.at(i).deallocate();
+ }
+
+ static size_t bytesAllocatedByAllAllocators()
+ {
+ size_t total = 0;
+ std::lock_guard<std::mutex> lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ total += (*allocator)->bytesAllocated();
+ return total;
+ }
+
+ static size_t bytesCommittedByAllocactors()
+ {
+ size_t total = 0;
+ std::lock_guard<std::mutex> lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ total += (*allocator)->bytesCommitted();
+ return total;
+ }
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+ static void dumpProfileFromAllAllocators()
+ {
+ std::lock_guard<std::mutex> lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ (*allocator)->dumpProfile();
+ }
+#endif
+
+protected:
+ virtual void* allocateNewSpace(size_t& numPages)
+ {
+ size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
+
+ ASSERT(newNumPages >= numPages);
+
+ numPages = newNumPages;
+
+#ifdef EXECUTABLE_MEMORY_LIMIT
+ if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
+ return 0;
+#endif
+
+ PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ RELEASE_ASSERT(reservation);
+
+ reservations.append(reservation);
+
+ return reservation.base();
+ }
+
+ virtual void notifyNeedPage(void* page)
+ {
+ OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
+ }
+
+ virtual void notifyPageIsFree(void* page)
+ {
+ OSAllocator::decommit(page, pageSize());
+ }
+
+private:
+ Vector<PageReservation, 16> reservations;
+ static HashSet<DemandExecutableAllocator*>& allocators()
+ {
+ DEPRECATED_DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
+ return sAllocators;
+ }
+
+ static std::mutex& allocatorsMutex()
+ {
+ static NeverDestroyed<std::mutex> mutex;
+
+ return mutex;
+ }
+};