]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/ExecutableAllocator.h
445852b12a21d545e82f924f6a2e63a44f10d5d1
[apple/javascriptcore.git] / jit / ExecutableAllocator.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28
29 #include <stddef.h> // for ptrdiff_t
30 #include <limits>
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
36
37 #if OS(IPHONE_OS)
38 #include <libkern/OSCacheControl.h>
39 #include <sys/mman.h>
40 #endif
41
42 #if OS(SYMBIAN)
43 #include <e32std.h>
44 #endif
45
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
48 #endif
49
50 #if OS(WINCE)
51 // From pkfuncs.h (private header file from the Platform Builder)
52 #define CACHE_SYNC_ALL 0x07F
53 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
54 #endif
55
56 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
57 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
58
59 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
60 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
61 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
62 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
63 #else
64 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
65 #endif
66
67 namespace JSC {
68
69 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
70 {
71 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
72 CRASH(); // Allocation is too large
73
74 // Round up to next page boundary
75 size_t size = request + (granularity - 1);
76 size = size & ~(granularity - 1);
77 ASSERT(size >= request);
78 return size;
79 }
80
81 }
82
83 #if ENABLE(ASSEMBLER)
84
85 namespace JSC {
86
87 class ExecutablePool : public RefCounted<ExecutablePool> {
88 private:
89 struct Allocation {
90 char* pages;
91 size_t size;
92 #if OS(SYMBIAN)
93 RChunk* chunk;
94 #endif
95 };
96 typedef Vector<Allocation, 2> AllocationList;
97
98 public:
99 static PassRefPtr<ExecutablePool> create(size_t n)
100 {
101 return adoptRef(new ExecutablePool(n));
102 }
103
104 void* alloc(size_t n)
105 {
106 ASSERT(m_freePtr <= m_end);
107
108 // Round 'n' up to a multiple of word size; if all allocations are of
109 // word sized quantities, then all subsequent allocations will be aligned.
110 n = roundUpAllocationSize(n, sizeof(void*));
111
112 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
113 void* result = m_freePtr;
114 m_freePtr += n;
115 return result;
116 }
117
118 // Insufficient space to allocate in the existing pool
119 // so we need allocate into a new pool
120 return poolAllocate(n);
121 }
122
123 ~ExecutablePool()
124 {
125 AllocationList::const_iterator end = m_pools.end();
126 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
127 ExecutablePool::systemRelease(*ptr);
128 }
129
130 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
131
132 private:
133 static Allocation systemAlloc(size_t n);
134 static void systemRelease(const Allocation& alloc);
135
136 ExecutablePool(size_t n);
137
138 void* poolAllocate(size_t n);
139
140 char* m_freePtr;
141 char* m_end;
142 AllocationList m_pools;
143 };
144
145 class ExecutableAllocator {
146 enum ProtectionSeting { Writable, Executable };
147
148 public:
149 static size_t pageSize;
150 ExecutableAllocator()
151 {
152 if (!pageSize)
153 intializePageSize();
154 if (isValid())
155 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
156 #if !ENABLE(INTERPRETER)
157 else
158 CRASH();
159 #endif
160 }
161
162 bool isValid() const;
163
164 PassRefPtr<ExecutablePool> poolForSize(size_t n)
165 {
166 // Try to fit in the existing small allocator
167 ASSERT(m_smallAllocationPool);
168 if (n < m_smallAllocationPool->available())
169 return m_smallAllocationPool;
170
171 // If the request is large, we just provide a unshared allocator
172 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
173 return ExecutablePool::create(n);
174
175 // Create a new allocator
176 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
177
178 // If the new allocator will result in more free space than in
179 // the current small allocator, then we will use it instead
180 if ((pool->available() - n) > m_smallAllocationPool->available())
181 m_smallAllocationPool = pool;
182 return pool.release();
183 }
184
185 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
186 static void makeWritable(void* start, size_t size)
187 {
188 reprotectRegion(start, size, Writable);
189 }
190
191 static void makeExecutable(void* start, size_t size)
192 {
193 reprotectRegion(start, size, Executable);
194 }
195 #else
196 static void makeWritable(void*, size_t) {}
197 static void makeExecutable(void*, size_t) {}
198 #endif
199
200
201 #if CPU(X86) || CPU(X86_64)
202 static void cacheFlush(void*, size_t)
203 {
204 }
205 #elif CPU(MIPS)
206 static void cacheFlush(void* code, size_t size)
207 {
208 #if COMPILER(GCC) && (GCC_VERSION >= 40300)
209 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
210 int lineSize;
211 asm("rdhwr %0, $1" : "=r" (lineSize));
212 //
213 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
214 // mips_expand_synci_loop that may execute synci one more time.
215 // "start" points to the fisrt byte of the cache line.
216 // "end" points to the last byte of the line before the last cache line.
217 // Because size is always a multiple of 4, this is safe to set
218 // "end" to the last byte.
219 //
220 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
221 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
222 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
223 #else
224 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
225 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
226 #endif
227 #else
228 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
229 #endif
230 }
231 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
232 static void cacheFlush(void* code, size_t size)
233 {
234 sys_dcache_flush(code, size);
235 sys_icache_invalidate(code, size);
236 }
237 #elif CPU(ARM_THUMB2) && OS(LINUX)
238 static void cacheFlush(void* code, size_t size)
239 {
240 asm volatile (
241 "push {r7}\n"
242 "mov r0, %0\n"
243 "mov r1, %1\n"
244 "movw r7, #0x2\n"
245 "movt r7, #0xf\n"
246 "movs r2, #0x0\n"
247 "svc 0x0\n"
248 "pop {r7}\n"
249 :
250 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
251 : "r0", "r1", "r2");
252 }
253 #elif OS(SYMBIAN)
254 static void cacheFlush(void* code, size_t size)
255 {
256 User::IMB_Range(code, static_cast<char*>(code) + size);
257 }
258 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
259 static __asm void cacheFlush(void* code, size_t size);
260 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
261 static void cacheFlush(void* code, size_t size)
262 {
263 asm volatile (
264 "push {r7}\n"
265 "mov r0, %0\n"
266 "mov r1, %1\n"
267 "mov r7, #0xf0000\n"
268 "add r7, r7, #0x2\n"
269 "mov r2, #0x0\n"
270 "svc 0x0\n"
271 "pop {r7}\n"
272 :
273 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
274 : "r0", "r1", "r2");
275 }
276 #elif OS(WINCE)
277 static void cacheFlush(void* code, size_t size)
278 {
279 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
280 }
281 #else
282 #error "The cacheFlush support is missing on this platform."
283 #endif
284
285 private:
286
287 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
288 static void reprotectRegion(void*, size_t, ProtectionSeting);
289 #endif
290
291 RefPtr<ExecutablePool> m_smallAllocationPool;
292 static void intializePageSize();
293 };
294
295 inline ExecutablePool::ExecutablePool(size_t n)
296 {
297 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
298 Allocation mem = systemAlloc(allocSize);
299 m_pools.append(mem);
300 m_freePtr = mem.pages;
301 if (!m_freePtr)
302 CRASH(); // Failed to allocate
303 m_end = m_freePtr + allocSize;
304 }
305
306 inline void* ExecutablePool::poolAllocate(size_t n)
307 {
308 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
309
310 Allocation result = systemAlloc(allocSize);
311 if (!result.pages)
312 CRASH(); // Failed to allocate
313
314 ASSERT(m_end >= m_freePtr);
315 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
316 // Replace allocation pool
317 m_freePtr = result.pages + n;
318 m_end = result.pages + allocSize;
319 }
320
321 m_pools.append(result);
322 return result.pages;
323 }
324
325 }
326
327 #endif // ENABLE(ASSEMBLER)
328
329 #endif // !defined(ExecutableAllocator)