2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
29 #include <stddef.h> // for ptrdiff_t
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
38 #include <libkern/OSCacheControl.h>
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
51 // From pkfuncs.h (private header file from the Platform Builder)
52 #define CACHE_SYNC_ALL 0x07F
53 extern "C" __declspec(dllimport
) void CacheRangeFlush(LPVOID pAddr
, DWORD dwLength
, DWORD dwFlags
);
56 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
57 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
59 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
60 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
61 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
62 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
64 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
69 inline size_t roundUpAllocationSize(size_t request
, size_t granularity
)
71 if ((std::numeric_limits
<size_t>::max() - granularity
) <= request
)
72 CRASH(); // Allocation is too large
74 // Round up to next page boundary
75 size_t size
= request
+ (granularity
- 1);
76 size
= size
& ~(granularity
- 1);
77 ASSERT(size
>= request
);
87 class ExecutablePool
: public RefCounted
<ExecutablePool
> {
96 typedef Vector
<Allocation
, 2> AllocationList
;
99 static PassRefPtr
<ExecutablePool
> create(size_t n
)
101 return adoptRef(new ExecutablePool(n
));
104 void* alloc(size_t n
)
106 ASSERT(m_freePtr
<= m_end
);
108 // Round 'n' up to a multiple of word size; if all allocations are of
109 // word sized quantities, then all subsequent allocations will be aligned.
110 n
= roundUpAllocationSize(n
, sizeof(void*));
112 if (static_cast<ptrdiff_t>(n
) < (m_end
- m_freePtr
)) {
113 void* result
= m_freePtr
;
118 // Insufficient space to allocate in the existing pool
119 // so we need allocate into a new pool
120 return poolAllocate(n
);
125 AllocationList::const_iterator end
= m_pools
.end();
126 for (AllocationList::const_iterator ptr
= m_pools
.begin(); ptr
!= end
; ++ptr
)
127 ExecutablePool::systemRelease(*ptr
);
130 size_t available() const { return (m_pools
.size() > 1) ? 0 : m_end
- m_freePtr
; }
133 static Allocation
systemAlloc(size_t n
);
134 static void systemRelease(const Allocation
& alloc
);
136 ExecutablePool(size_t n
);
138 void* poolAllocate(size_t n
);
142 AllocationList m_pools
;
145 class ExecutableAllocator
{
146 enum ProtectionSeting
{ Writable
, Executable
};
149 static size_t pageSize
;
150 ExecutableAllocator()
155 m_smallAllocationPool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
156 #if !ENABLE(INTERPRETER)
162 bool isValid() const;
164 PassRefPtr
<ExecutablePool
> poolForSize(size_t n
)
166 // Try to fit in the existing small allocator
167 ASSERT(m_smallAllocationPool
);
168 if (n
< m_smallAllocationPool
->available())
169 return m_smallAllocationPool
;
171 // If the request is large, we just provide a unshared allocator
172 if (n
> JIT_ALLOCATOR_LARGE_ALLOC_SIZE
)
173 return ExecutablePool::create(n
);
175 // Create a new allocator
176 RefPtr
<ExecutablePool
> pool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
178 // If the new allocator will result in more free space than in
179 // the current small allocator, then we will use it instead
180 if ((pool
->available() - n
) > m_smallAllocationPool
->available())
181 m_smallAllocationPool
= pool
;
182 return pool
.release();
185 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
186 static void makeWritable(void* start
, size_t size
)
188 reprotectRegion(start
, size
, Writable
);
191 static void makeExecutable(void* start
, size_t size
)
193 reprotectRegion(start
, size
, Executable
);
196 static void makeWritable(void*, size_t) {}
197 static void makeExecutable(void*, size_t) {}
201 #if CPU(X86) || CPU(X86_64)
202 static void cacheFlush(void*, size_t)
206 static void cacheFlush(void* code
, size_t size
)
208 #if COMPILER(GCC) && (GCC_VERSION >= 40300)
209 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
211 asm("rdhwr %0, $1" : "=r" (lineSize
));
213 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
214 // mips_expand_synci_loop that may execute synci one more time.
215 // "start" points to the fisrt byte of the cache line.
216 // "end" points to the last byte of the line before the last cache line.
217 // Because size is always a multiple of 4, this is safe to set
218 // "end" to the last byte.
220 intptr_t start
= reinterpret_cast<intptr_t>(code
) & (-lineSize
);
221 intptr_t end
= ((reinterpret_cast<intptr_t>(code
) + size
- 1) & (-lineSize
)) - 1;
222 __builtin___clear_cache(reinterpret_cast<char*>(start
), reinterpret_cast<char*>(end
));
224 intptr_t end
= reinterpret_cast<intptr_t>(code
) + size
;
225 __builtin___clear_cache(reinterpret_cast<char*>(code
), reinterpret_cast<char*>(end
));
228 _flush_cache(reinterpret_cast<char*>(code
), size
, BCACHE
);
231 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
232 static void cacheFlush(void* code
, size_t size
)
234 sys_dcache_flush(code
, size
);
235 sys_icache_invalidate(code
, size
);
237 #elif CPU(ARM_THUMB2) && OS(LINUX)
238 static void cacheFlush(void* code
, size_t size
)
250 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
254 static void cacheFlush(void* code
, size_t size
)
256 User::IMB_Range(code
, static_cast<char*>(code
) + size
);
258 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
259 static __asm
void cacheFlush(void* code
, size_t size
);
260 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
261 static void cacheFlush(void* code
, size_t size
)
273 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
277 static void cacheFlush(void* code
, size_t size
)
279 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
282 #error "The cacheFlush support is missing on this platform."
287 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
288 static void reprotectRegion(void*, size_t, ProtectionSeting
);
291 RefPtr
<ExecutablePool
> m_smallAllocationPool
;
292 static void intializePageSize();
295 inline ExecutablePool::ExecutablePool(size_t n
)
297 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
298 Allocation mem
= systemAlloc(allocSize
);
300 m_freePtr
= mem
.pages
;
302 CRASH(); // Failed to allocate
303 m_end
= m_freePtr
+ allocSize
;
306 inline void* ExecutablePool::poolAllocate(size_t n
)
308 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
310 Allocation result
= systemAlloc(allocSize
);
312 CRASH(); // Failed to allocate
314 ASSERT(m_end
>= m_freePtr
);
315 if ((allocSize
- n
) > static_cast<size_t>(m_end
- m_freePtr
)) {
316 // Replace allocation pool
317 m_freePtr
= result
.pages
+ n
;
318 m_end
= result
.pages
+ allocSize
;
321 m_pools
.append(result
);
327 #endif // ENABLE(ASSEMBLER)
329 #endif // !defined(ExecutableAllocator)