2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
30 #include <wtf/Assertions.h>
31 #include <wtf/PassRefPtr.h>
32 #include <wtf/RefCounted.h>
33 #include <wtf/UnusedParam.h>
34 #include <wtf/Vector.h>
36 #include <libkern/OSCacheControl.h>
39 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
40 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
42 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
43 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
44 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
45 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
47 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
52 inline size_t roundUpAllocationSize(size_t request
, size_t granularity
)
54 if ((std::numeric_limits
<size_t>::max() - granularity
) <= request
)
55 CRASH(); // Allocation is too large
57 // Round up to next page boundary
58 size_t size
= request
+ (granularity
- 1);
59 size
= size
& ~(granularity
- 1);
60 ASSERT(size
>= request
);
70 class ExecutablePool
: public RefCounted
<ExecutablePool
> {
76 typedef Vector
<Allocation
, 2> AllocationList
;
79 static PassRefPtr
<ExecutablePool
> create(size_t n
)
81 return adoptRef(new ExecutablePool(n
));
86 ASSERT(m_freePtr
<= m_end
);
88 // Round 'n' up to a multiple of word size; if all allocations are of
89 // word sized quantities, then all subsequent allocations will be aligned.
90 n
= roundUpAllocationSize(n
, sizeof(void*));
92 if (static_cast<ptrdiff_t>(n
) < (m_end
- m_freePtr
)) {
93 void* result
= m_freePtr
;
98 // Insufficient space to allocate in the existing pool
99 // so we need allocate into a new pool
100 return poolAllocate(n
);
105 AllocationList::const_iterator end
= m_pools
.end();
106 for (AllocationList::const_iterator ptr
= m_pools
.begin(); ptr
!= end
; ++ptr
)
107 ExecutablePool::systemRelease(*ptr
);
110 size_t available() const { return (m_pools
.size() > 1) ? 0 : m_end
- m_freePtr
; }
113 static Allocation
systemAlloc(size_t n
);
114 static void systemRelease(const Allocation
& alloc
);
116 ExecutablePool(size_t n
);
118 void* poolAllocate(size_t n
);
122 AllocationList m_pools
;
125 class ExecutableAllocator
{
126 enum ProtectionSeting
{ Writable
, Executable
};
129 static size_t pageSize
;
130 ExecutableAllocator()
134 m_smallAllocationPool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
137 PassRefPtr
<ExecutablePool
> poolForSize(size_t n
)
139 // Try to fit in the existing small allocator
140 if (n
< m_smallAllocationPool
->available())
141 return m_smallAllocationPool
;
143 // If the request is large, we just provide a unshared allocator
144 if (n
> JIT_ALLOCATOR_LARGE_ALLOC_SIZE
)
145 return ExecutablePool::create(n
);
147 // Create a new allocator
148 RefPtr
<ExecutablePool
> pool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
150 // If the new allocator will result in more free space than in
151 // the current small allocator, then we will use it instead
152 if ((pool
->available() - n
) > m_smallAllocationPool
->available())
153 m_smallAllocationPool
= pool
;
154 return pool
.release();
157 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
158 static void makeWritable(void* start
, size_t size
)
160 reprotectRegion(start
, size
, Writable
);
163 static void makeExecutable(void* start
, size_t size
)
165 reprotectRegion(start
, size
, Executable
);
168 static void makeWritable(void*, size_t) {}
169 static void makeExecutable(void*, size_t) {}
173 #if PLATFORM(X86) || PLATFORM(X86_64)
174 static void cacheFlush(void*, size_t)
177 #elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
178 static void cacheFlush(void* code
, size_t size
)
180 sys_dcache_flush(code
, size
);
181 sys_icache_invalidate(code
, size
);
184 #error "ExecutableAllocator::cacheFlush not implemented on this platform."
189 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
190 static void reprotectRegion(void*, size_t, ProtectionSeting
);
193 RefPtr
<ExecutablePool
> m_smallAllocationPool
;
194 static void intializePageSize();
197 inline ExecutablePool::ExecutablePool(size_t n
)
199 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
200 Allocation mem
= systemAlloc(allocSize
);
202 m_freePtr
= mem
.pages
;
204 CRASH(); // Failed to allocate
205 m_end
= m_freePtr
+ allocSize
;
208 inline void* ExecutablePool::poolAllocate(size_t n
)
210 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
212 Allocation result
= systemAlloc(allocSize
);
214 CRASH(); // Failed to allocate
216 ASSERT(m_end
>= m_freePtr
);
217 if ((allocSize
- n
) > static_cast<size_t>(m_end
- m_freePtr
)) {
218 // Replace allocation pool
219 m_freePtr
= result
.pages
+ n
;
220 m_end
= result
.pages
+ allocSize
;
223 m_pools
.append(result
);
228 #endif // ENABLE(ASSEMBLER)
230 #endif // !defined(ExecutableAllocator)