]>
Commit | Line | Data |
---|---|---|
9dae56ea A |
1 | /* |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef ExecutableAllocator_h | |
27 | #define ExecutableAllocator_h | |
28 | ||
f9bf01c6 | 29 | #include <stddef.h> // for ptrdiff_t |
ba379fdc | 30 | #include <limits> |
9dae56ea A |
31 | #include <wtf/Assertions.h> |
32 | #include <wtf/PassRefPtr.h> | |
33 | #include <wtf/RefCounted.h> | |
ba379fdc | 34 | #include <wtf/UnusedParam.h> |
9dae56ea A |
35 | #include <wtf/Vector.h> |
36 | ||
f9bf01c6 | 37 | #if OS(IPHONE_OS) |
ba379fdc A |
38 | #include <libkern/OSCacheControl.h> |
39 | #include <sys/mman.h> | |
f9bf01c6 A |
40 | #endif |
41 | ||
42 | #if OS(SYMBIAN) | |
43 | #include <e32std.h> | |
44 | #endif | |
45 | ||
46 | #if OS(WINCE) | |
47 | // From pkfuncs.h (private header file from the Platform Builder) | |
48 | #define CACHE_SYNC_ALL 0x07F | |
49 | extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); | |
50 | #endif | |
9dae56ea A |
51 | |
52 | #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize) | |
53 | #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4) | |
54 | ||
ba379fdc A |
55 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) |
56 | #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) | |
57 | #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) | |
58 | #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX | |
59 | #else | |
60 | #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC) | |
61 | #endif | |
62 | ||
63 | namespace JSC { | |
64 | ||
65 | inline size_t roundUpAllocationSize(size_t request, size_t granularity) | |
66 | { | |
67 | if ((std::numeric_limits<size_t>::max() - granularity) <= request) | |
68 | CRASH(); // Allocation is too large | |
69 | ||
70 | // Round up to next page boundary | |
71 | size_t size = request + (granularity - 1); | |
72 | size = size & ~(granularity - 1); | |
73 | ASSERT(size >= request); | |
74 | return size; | |
75 | } | |
76 | ||
77 | } | |
78 | ||
79 | #if ENABLE(ASSEMBLER) | |
80 | ||
9dae56ea A |
81 | namespace JSC { |
82 | ||
83 | class ExecutablePool : public RefCounted<ExecutablePool> { | |
84 | private: | |
85 | struct Allocation { | |
86 | char* pages; | |
87 | size_t size; | |
f9bf01c6 A |
88 | #if OS(SYMBIAN) |
89 | RChunk* chunk; | |
90 | #endif | |
9dae56ea A |
91 | }; |
92 | typedef Vector<Allocation, 2> AllocationList; | |
93 | ||
94 | public: | |
95 | static PassRefPtr<ExecutablePool> create(size_t n) | |
96 | { | |
97 | return adoptRef(new ExecutablePool(n)); | |
98 | } | |
99 | ||
100 | void* alloc(size_t n) | |
101 | { | |
102 | ASSERT(m_freePtr <= m_end); | |
103 | ||
104 | // Round 'n' up to a multiple of word size; if all allocations are of | |
105 | // word sized quantities, then all subsequent allocations will be aligned. | |
106 | n = roundUpAllocationSize(n, sizeof(void*)); | |
107 | ||
108 | if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) { | |
109 | void* result = m_freePtr; | |
110 | m_freePtr += n; | |
111 | return result; | |
112 | } | |
113 | ||
114 | // Insufficient space to allocate in the existing pool | |
115 | // so we need allocate into a new pool | |
116 | return poolAllocate(n); | |
117 | } | |
118 | ||
119 | ~ExecutablePool() | |
120 | { | |
121 | AllocationList::const_iterator end = m_pools.end(); | |
122 | for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr) | |
123 | ExecutablePool::systemRelease(*ptr); | |
124 | } | |
125 | ||
126 | size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; } | |
127 | ||
128 | private: | |
129 | static Allocation systemAlloc(size_t n); | |
130 | static void systemRelease(const Allocation& alloc); | |
131 | ||
9dae56ea A |
132 | ExecutablePool(size_t n); |
133 | ||
134 | void* poolAllocate(size_t n); | |
135 | ||
136 | char* m_freePtr; | |
137 | char* m_end; | |
138 | AllocationList m_pools; | |
139 | }; | |
140 | ||
141 | class ExecutableAllocator { | |
ba379fdc A |
142 | enum ProtectionSeting { Writable, Executable }; |
143 | ||
9dae56ea A |
144 | public: |
145 | static size_t pageSize; | |
146 | ExecutableAllocator() | |
147 | { | |
148 | if (!pageSize) | |
149 | intializePageSize(); | |
150 | m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); | |
151 | } | |
152 | ||
153 | PassRefPtr<ExecutablePool> poolForSize(size_t n) | |
154 | { | |
155 | // Try to fit in the existing small allocator | |
156 | if (n < m_smallAllocationPool->available()) | |
157 | return m_smallAllocationPool; | |
158 | ||
159 | // If the request is large, we just provide a unshared allocator | |
160 | if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE) | |
161 | return ExecutablePool::create(n); | |
162 | ||
163 | // Create a new allocator | |
164 | RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); | |
165 | ||
166 | // If the new allocator will result in more free space than in | |
167 | // the current small allocator, then we will use it instead | |
168 | if ((pool->available() - n) > m_smallAllocationPool->available()) | |
169 | m_smallAllocationPool = pool; | |
170 | return pool.release(); | |
171 | } | |
172 | ||
ba379fdc A |
173 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) |
174 | static void makeWritable(void* start, size_t size) | |
175 | { | |
176 | reprotectRegion(start, size, Writable); | |
177 | } | |
178 | ||
179 | static void makeExecutable(void* start, size_t size) | |
180 | { | |
181 | reprotectRegion(start, size, Executable); | |
182 | } | |
183 | #else | |
184 | static void makeWritable(void*, size_t) {} | |
185 | static void makeExecutable(void*, size_t) {} | |
186 | #endif | |
187 | ||
188 | ||
f9bf01c6 | 189 | #if CPU(X86) || CPU(X86_64) |
ba379fdc A |
190 | static void cacheFlush(void*, size_t) |
191 | { | |
192 | } | |
f9bf01c6 | 193 | #elif CPU(ARM_THUMB2) && OS(IPHONE_OS) |
ba379fdc A |
194 | static void cacheFlush(void* code, size_t size) |
195 | { | |
196 | sys_dcache_flush(code, size); | |
197 | sys_icache_invalidate(code, size); | |
198 | } | |
f9bf01c6 A |
199 | #elif CPU(ARM_THUMB2) && OS(LINUX) |
200 | static void cacheFlush(void* code, size_t size) | |
201 | { | |
202 | asm volatile ( | |
203 | "push {r7}\n" | |
204 | "mov r0, %0\n" | |
205 | "mov r1, %1\n" | |
206 | "movw r7, #0x2\n" | |
207 | "movt r7, #0xf\n" | |
208 | "movs r2, #0x0\n" | |
209 | "svc 0x0\n" | |
210 | "pop {r7}\n" | |
211 | : | |
212 | : "r" (code), "r" (reinterpret_cast<char*>(code) + size) | |
213 | : "r0", "r1", "r2"); | |
214 | } | |
215 | #elif OS(SYMBIAN) | |
216 | static void cacheFlush(void* code, size_t size) | |
217 | { | |
218 | User::IMB_Range(code, static_cast<char*>(code) + size); | |
219 | } | |
220 | #elif CPU(ARM_TRADITIONAL) && OS(LINUX) | |
221 | static void cacheFlush(void* code, size_t size) | |
222 | { | |
223 | asm volatile ( | |
224 | "push {r7}\n" | |
225 | "mov r0, %0\n" | |
226 | "mov r1, %1\n" | |
227 | "mov r7, #0xf0000\n" | |
228 | "add r7, r7, #0x2\n" | |
229 | "mov r2, #0x0\n" | |
230 | "svc 0x0\n" | |
231 | "pop {r7}\n" | |
232 | : | |
233 | : "r" (code), "r" (reinterpret_cast<char*>(code) + size) | |
234 | : "r0", "r1", "r2"); | |
235 | } | |
236 | #elif OS(WINCE) | |
237 | static void cacheFlush(void* code, size_t size) | |
238 | { | |
239 | CacheRangeFlush(code, size, CACHE_SYNC_ALL); | |
240 | } | |
ba379fdc | 241 | #else |
f9bf01c6 | 242 | #error "The cacheFlush support is missing on this platform." |
ba379fdc A |
243 | #endif |
244 | ||
9dae56ea | 245 | private: |
ba379fdc A |
246 | |
247 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) | |
248 | static void reprotectRegion(void*, size_t, ProtectionSeting); | |
249 | #endif | |
250 | ||
9dae56ea A |
251 | RefPtr<ExecutablePool> m_smallAllocationPool; |
252 | static void intializePageSize(); | |
253 | }; | |
254 | ||
255 | inline ExecutablePool::ExecutablePool(size_t n) | |
256 | { | |
257 | size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); | |
258 | Allocation mem = systemAlloc(allocSize); | |
259 | m_pools.append(mem); | |
260 | m_freePtr = mem.pages; | |
261 | if (!m_freePtr) | |
262 | CRASH(); // Failed to allocate | |
263 | m_end = m_freePtr + allocSize; | |
264 | } | |
265 | ||
266 | inline void* ExecutablePool::poolAllocate(size_t n) | |
267 | { | |
268 | size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); | |
269 | ||
270 | Allocation result = systemAlloc(allocSize); | |
271 | if (!result.pages) | |
272 | CRASH(); // Failed to allocate | |
273 | ||
274 | ASSERT(m_end >= m_freePtr); | |
275 | if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) { | |
276 | // Replace allocation pool | |
277 | m_freePtr = result.pages + n; | |
278 | m_end = result.pages + allocSize; | |
279 | } | |
280 | ||
281 | m_pools.append(result); | |
282 | return result.pages; | |
283 | } | |
284 | ||
285 | } | |
f9bf01c6 | 286 | |
9dae56ea A |
287 | #endif // ENABLE(ASSEMBLER) |
288 | ||
289 | #endif // !defined(ExecutableAllocator) |