2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
29 #include <stddef.h> // for ptrdiff_t
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
38 #include <libkern/OSCacheControl.h>
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
51 // From pkfuncs.h (private header file from the Platform Builder)
52 #define CACHE_SYNC_ALL 0x07F
53 extern "C" __declspec(dllimport
) void CacheRangeFlush(LPVOID pAddr
, DWORD dwLength
, DWORD dwFlags
);
56 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
57 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
59 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
60 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
61 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
62 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
64 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
69 inline size_t roundUpAllocationSize(size_t request
, size_t granularity
)
71 if ((std::numeric_limits
<size_t>::max() - granularity
) <= request
)
72 CRASH(); // Allocation is too large
74 // Round up to next page boundary
75 size_t size
= request
+ (granularity
- 1);
76 size
= size
& ~(granularity
- 1);
77 ASSERT(size
>= request
);
87 class ExecutablePool
: public RefCounted
<ExecutablePool
> {
96 typedef Vector
<Allocation
, 2> AllocationList
;
99 static PassRefPtr
<ExecutablePool
> create(size_t n
)
101 return adoptRef(new ExecutablePool(n
));
104 void* alloc(size_t n
)
106 ASSERT(m_freePtr
<= m_end
);
108 // Round 'n' up to a multiple of word size; if all allocations are of
109 // word sized quantities, then all subsequent allocations will be aligned.
110 n
= roundUpAllocationSize(n
, sizeof(void*));
112 if (static_cast<ptrdiff_t>(n
) < (m_end
- m_freePtr
)) {
113 void* result
= m_freePtr
;
118 // Insufficient space to allocate in the existing pool
119 // so we need allocate into a new pool
120 return poolAllocate(n
);
123 void tryShrink(void* allocation
, size_t oldSize
, size_t newSize
)
125 if (static_cast<char*>(allocation
) + oldSize
!= m_freePtr
)
127 m_freePtr
= static_cast<char*>(allocation
) + roundUpAllocationSize(newSize
, sizeof(void*));
132 AllocationList::const_iterator end
= m_pools
.end();
133 for (AllocationList::const_iterator ptr
= m_pools
.begin(); ptr
!= end
; ++ptr
)
134 ExecutablePool::systemRelease(*ptr
);
137 size_t available() const { return (m_pools
.size() > 1) ? 0 : m_end
- m_freePtr
; }
139 static bool underMemoryPressure();
142 static Allocation
systemAlloc(size_t n
);
143 static void systemRelease(const Allocation
& alloc
);
145 ExecutablePool(size_t n
);
147 void* poolAllocate(size_t n
);
151 AllocationList m_pools
;
154 class ExecutableAllocator
{
155 enum ProtectionSeting
{ Writable
, Executable
};
158 static size_t pageSize
;
159 ExecutableAllocator()
164 m_smallAllocationPool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
165 #if !ENABLE(INTERPRETER)
171 bool isValid() const;
173 PassRefPtr
<ExecutablePool
> poolForSize(size_t n
)
175 // Try to fit in the existing small allocator
176 ASSERT(m_smallAllocationPool
);
177 if (n
< m_smallAllocationPool
->available())
178 return m_smallAllocationPool
;
180 // If the request is large, we just provide a unshared allocator
181 if (n
> JIT_ALLOCATOR_LARGE_ALLOC_SIZE
)
182 return ExecutablePool::create(n
);
184 // Create a new allocator
185 RefPtr
<ExecutablePool
> pool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
187 // If the new allocator will result in more free space than in
188 // the current small allocator, then we will use it instead
189 if ((pool
->available() - n
) > m_smallAllocationPool
->available())
190 m_smallAllocationPool
= pool
;
191 return pool
.release();
194 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
195 static void makeWritable(void* start
, size_t size
)
197 reprotectRegion(start
, size
, Writable
);
200 static void makeExecutable(void* start
, size_t size
)
202 reprotectRegion(start
, size
, Executable
);
205 static void makeWritable(void*, size_t) {}
206 static void makeExecutable(void*, size_t) {}
210 #if CPU(X86) || CPU(X86_64)
211 static void cacheFlush(void*, size_t)
215 static void cacheFlush(void* code
, size_t size
)
217 #if COMPILER(GCC) && (GCC_VERSION >= 40300)
218 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
220 asm("rdhwr %0, $1" : "=r" (lineSize
));
222 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
223 // mips_expand_synci_loop that may execute synci one more time.
224 // "start" points to the fisrt byte of the cache line.
225 // "end" points to the last byte of the line before the last cache line.
226 // Because size is always a multiple of 4, this is safe to set
227 // "end" to the last byte.
229 intptr_t start
= reinterpret_cast<intptr_t>(code
) & (-lineSize
);
230 intptr_t end
= ((reinterpret_cast<intptr_t>(code
) + size
- 1) & (-lineSize
)) - 1;
231 __builtin___clear_cache(reinterpret_cast<char*>(start
), reinterpret_cast<char*>(end
));
233 intptr_t end
= reinterpret_cast<intptr_t>(code
) + size
;
234 __builtin___clear_cache(reinterpret_cast<char*>(code
), reinterpret_cast<char*>(end
));
237 _flush_cache(reinterpret_cast<char*>(code
), size
, BCACHE
);
240 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
241 static void cacheFlush(void* code
, size_t size
)
243 sys_dcache_flush(code
, size
);
244 sys_icache_invalidate(code
, size
);
246 #elif CPU(ARM_THUMB2) && OS(LINUX)
247 static void cacheFlush(void* code
, size_t size
)
259 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
263 static void cacheFlush(void* code
, size_t size
)
265 User::IMB_Range(code
, static_cast<char*>(code
) + size
);
267 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
268 static __asm
void cacheFlush(void* code
, size_t size
);
269 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
270 static void cacheFlush(void* code
, size_t size
)
282 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
286 static void cacheFlush(void* code
, size_t size
)
288 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
291 #error "The cacheFlush support is missing on this platform."
296 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
297 static void reprotectRegion(void*, size_t, ProtectionSeting
);
300 RefPtr
<ExecutablePool
> m_smallAllocationPool
;
301 static void intializePageSize();
304 inline ExecutablePool::ExecutablePool(size_t n
)
306 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
307 Allocation mem
= systemAlloc(allocSize
);
309 m_freePtr
= mem
.pages
;
311 CRASH(); // Failed to allocate
312 m_end
= m_freePtr
+ allocSize
;
315 inline void* ExecutablePool::poolAllocate(size_t n
)
317 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
319 Allocation result
= systemAlloc(allocSize
);
321 CRASH(); // Failed to allocate
323 ASSERT(m_end
>= m_freePtr
);
324 if ((allocSize
- n
) > static_cast<size_t>(m_end
- m_freePtr
)) {
325 // Replace allocation pool
326 m_freePtr
= result
.pages
+ n
;
327 m_end
= result
.pages
+ allocSize
;
330 m_pools
.append(result
);
336 #endif // ENABLE(ASSEMBLER)
338 #endif // !defined(ExecutableAllocator)