]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/ExecutableAllocator.h
62d708d8e69d8c8b47099b1e83c9a36a14583372
[apple/javascriptcore.git] / jit / ExecutableAllocator.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28
29 #include <stddef.h> // for ptrdiff_t
30 #include <limits>
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
36
37 #if OS(IPHONE_OS)
38 #include <libkern/OSCacheControl.h>
39 #include <sys/mman.h>
40 #endif
41
42 #if OS(SYMBIAN)
43 #include <e32std.h>
44 #endif
45
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
48 #endif
49
50 #if OS(WINCE)
51 // From pkfuncs.h (private header file from the Platform Builder)
52 #define CACHE_SYNC_ALL 0x07F
53 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
54 #endif
55
56 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
57 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
58
59 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
60 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
61 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
62 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
63 #else
64 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
65 #endif
66
67 namespace JSC {
68
69 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
70 {
71 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
72 CRASH(); // Allocation is too large
73
74 // Round up to next page boundary
75 size_t size = request + (granularity - 1);
76 size = size & ~(granularity - 1);
77 ASSERT(size >= request);
78 return size;
79 }
80
81 }
82
83 #if ENABLE(ASSEMBLER)
84
85 namespace JSC {
86
87 class ExecutablePool : public RefCounted<ExecutablePool> {
88 private:
89 struct Allocation {
90 char* pages;
91 size_t size;
92 #if OS(SYMBIAN)
93 RChunk* chunk;
94 #endif
95 };
96 typedef Vector<Allocation, 2> AllocationList;
97
98 public:
99 static PassRefPtr<ExecutablePool> create(size_t n)
100 {
101 return adoptRef(new ExecutablePool(n));
102 }
103
104 void* alloc(size_t n)
105 {
106 ASSERT(m_freePtr <= m_end);
107
108 // Round 'n' up to a multiple of word size; if all allocations are of
109 // word sized quantities, then all subsequent allocations will be aligned.
110 n = roundUpAllocationSize(n, sizeof(void*));
111
112 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
113 void* result = m_freePtr;
114 m_freePtr += n;
115 return result;
116 }
117
118 // Insufficient space to allocate in the existing pool
119 // so we need allocate into a new pool
120 return poolAllocate(n);
121 }
122
123 void tryShrink(void* allocation, size_t oldSize, size_t newSize)
124 {
125 if (static_cast<char*>(allocation) + oldSize != m_freePtr)
126 return;
127 m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
128 }
129
130 ~ExecutablePool()
131 {
132 AllocationList::const_iterator end = m_pools.end();
133 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
134 ExecutablePool::systemRelease(*ptr);
135 }
136
137 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
138
139 static bool underMemoryPressure();
140
141 private:
142 static Allocation systemAlloc(size_t n);
143 static void systemRelease(const Allocation& alloc);
144
145 ExecutablePool(size_t n);
146
147 void* poolAllocate(size_t n);
148
149 char* m_freePtr;
150 char* m_end;
151 AllocationList m_pools;
152 };
153
154 class ExecutableAllocator {
155 enum ProtectionSeting { Writable, Executable };
156
157 public:
158 static size_t pageSize;
159 ExecutableAllocator()
160 {
161 if (!pageSize)
162 intializePageSize();
163 if (isValid())
164 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
165 #if !ENABLE(INTERPRETER)
166 else
167 CRASH();
168 #endif
169 }
170
171 bool isValid() const;
172
173 PassRefPtr<ExecutablePool> poolForSize(size_t n)
174 {
175 // Try to fit in the existing small allocator
176 ASSERT(m_smallAllocationPool);
177 if (n < m_smallAllocationPool->available())
178 return m_smallAllocationPool;
179
180 // If the request is large, we just provide a unshared allocator
181 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
182 return ExecutablePool::create(n);
183
184 // Create a new allocator
185 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
186
187 // If the new allocator will result in more free space than in
188 // the current small allocator, then we will use it instead
189 if ((pool->available() - n) > m_smallAllocationPool->available())
190 m_smallAllocationPool = pool;
191 return pool.release();
192 }
193
194 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
195 static void makeWritable(void* start, size_t size)
196 {
197 reprotectRegion(start, size, Writable);
198 }
199
200 static void makeExecutable(void* start, size_t size)
201 {
202 reprotectRegion(start, size, Executable);
203 }
204 #else
205 static void makeWritable(void*, size_t) {}
206 static void makeExecutable(void*, size_t) {}
207 #endif
208
209
210 #if CPU(X86) || CPU(X86_64)
211 static void cacheFlush(void*, size_t)
212 {
213 }
214 #elif CPU(MIPS)
215 static void cacheFlush(void* code, size_t size)
216 {
217 #if COMPILER(GCC) && (GCC_VERSION >= 40300)
218 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
219 int lineSize;
220 asm("rdhwr %0, $1" : "=r" (lineSize));
221 //
222 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
223 // mips_expand_synci_loop that may execute synci one more time.
224 // "start" points to the fisrt byte of the cache line.
225 // "end" points to the last byte of the line before the last cache line.
226 // Because size is always a multiple of 4, this is safe to set
227 // "end" to the last byte.
228 //
229 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
230 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
231 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
232 #else
233 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
234 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
235 #endif
236 #else
237 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
238 #endif
239 }
240 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
241 static void cacheFlush(void* code, size_t size)
242 {
243 sys_dcache_flush(code, size);
244 sys_icache_invalidate(code, size);
245 }
246 #elif CPU(ARM_THUMB2) && OS(LINUX)
247 static void cacheFlush(void* code, size_t size)
248 {
249 asm volatile (
250 "push {r7}\n"
251 "mov r0, %0\n"
252 "mov r1, %1\n"
253 "movw r7, #0x2\n"
254 "movt r7, #0xf\n"
255 "movs r2, #0x0\n"
256 "svc 0x0\n"
257 "pop {r7}\n"
258 :
259 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
260 : "r0", "r1", "r2");
261 }
262 #elif OS(SYMBIAN)
263 static void cacheFlush(void* code, size_t size)
264 {
265 User::IMB_Range(code, static_cast<char*>(code) + size);
266 }
267 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
268 static __asm void cacheFlush(void* code, size_t size);
269 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
270 static void cacheFlush(void* code, size_t size)
271 {
272 asm volatile (
273 "push {r7}\n"
274 "mov r0, %0\n"
275 "mov r1, %1\n"
276 "mov r7, #0xf0000\n"
277 "add r7, r7, #0x2\n"
278 "mov r2, #0x0\n"
279 "svc 0x0\n"
280 "pop {r7}\n"
281 :
282 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
283 : "r0", "r1", "r2");
284 }
285 #elif OS(WINCE)
286 static void cacheFlush(void* code, size_t size)
287 {
288 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
289 }
290 #else
291 #error "The cacheFlush support is missing on this platform."
292 #endif
293
294 private:
295
296 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
297 static void reprotectRegion(void*, size_t, ProtectionSeting);
298 #endif
299
300 RefPtr<ExecutablePool> m_smallAllocationPool;
301 static void intializePageSize();
302 };
303
304 inline ExecutablePool::ExecutablePool(size_t n)
305 {
306 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
307 Allocation mem = systemAlloc(allocSize);
308 m_pools.append(mem);
309 m_freePtr = mem.pages;
310 if (!m_freePtr)
311 CRASH(); // Failed to allocate
312 m_end = m_freePtr + allocSize;
313 }
314
315 inline void* ExecutablePool::poolAllocate(size_t n)
316 {
317 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
318
319 Allocation result = systemAlloc(allocSize);
320 if (!result.pages)
321 CRASH(); // Failed to allocate
322
323 ASSERT(m_end >= m_freePtr);
324 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
325 // Replace allocation pool
326 m_freePtr = result.pages + n;
327 m_end = result.pages + allocSize;
328 }
329
330 m_pools.append(result);
331 return result.pages;
332 }
333
334 }
335
336 #endif // ENABLE(ASSEMBLER)
337
338 #endif // !defined(ExecutableAllocator)