]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/ExecutableAllocator.h
5223bf38fcd7f7f5aa1e126e52783afe08ab4b94
[apple/javascriptcore.git] / jit / ExecutableAllocator.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28
29 #include <limits>
30 #include <wtf/Assertions.h>
31 #include <wtf/PassRefPtr.h>
32 #include <wtf/RefCounted.h>
33 #include <wtf/UnusedParam.h>
34 #include <wtf/Vector.h>
35
36 #include <libkern/OSCacheControl.h>
37 #include <sys/mman.h>
38
39 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
40 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
41
42 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
43 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
44 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
45 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
46 #else
47 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
48 #endif
49
50 namespace JSC {
51
52 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
53 {
54 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
55 CRASH(); // Allocation is too large
56
57 // Round up to next page boundary
58 size_t size = request + (granularity - 1);
59 size = size & ~(granularity - 1);
60 ASSERT(size >= request);
61 return size;
62 }
63
64 }
65
66 #if ENABLE(ASSEMBLER)
67
68 namespace JSC {
69
70 class ExecutablePool : public RefCounted<ExecutablePool> {
71 private:
72 struct Allocation {
73 char* pages;
74 size_t size;
75 };
76 typedef Vector<Allocation, 2> AllocationList;
77
78 public:
79 static PassRefPtr<ExecutablePool> create(size_t n)
80 {
81 return adoptRef(new ExecutablePool(n));
82 }
83
84 void* alloc(size_t n)
85 {
86 ASSERT(m_freePtr <= m_end);
87
88 // Round 'n' up to a multiple of word size; if all allocations are of
89 // word sized quantities, then all subsequent allocations will be aligned.
90 n = roundUpAllocationSize(n, sizeof(void*));
91
92 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
93 void* result = m_freePtr;
94 m_freePtr += n;
95 return result;
96 }
97
98 // Insufficient space to allocate in the existing pool
99 // so we need allocate into a new pool
100 return poolAllocate(n);
101 }
102
103 ~ExecutablePool()
104 {
105 AllocationList::const_iterator end = m_pools.end();
106 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
107 ExecutablePool::systemRelease(*ptr);
108 }
109
110 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
111
112 private:
113 static Allocation systemAlloc(size_t n);
114 static void systemRelease(const Allocation& alloc);
115
116 ExecutablePool(size_t n);
117
118 void* poolAllocate(size_t n);
119
120 char* m_freePtr;
121 char* m_end;
122 AllocationList m_pools;
123 };
124
125 class ExecutableAllocator {
126 enum ProtectionSeting { Writable, Executable };
127
128 public:
129 static size_t pageSize;
130 ExecutableAllocator()
131 {
132 if (!pageSize)
133 intializePageSize();
134 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
135 }
136
137 PassRefPtr<ExecutablePool> poolForSize(size_t n)
138 {
139 // Try to fit in the existing small allocator
140 if (n < m_smallAllocationPool->available())
141 return m_smallAllocationPool;
142
143 // If the request is large, we just provide a unshared allocator
144 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
145 return ExecutablePool::create(n);
146
147 // Create a new allocator
148 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
149
150 // If the new allocator will result in more free space than in
151 // the current small allocator, then we will use it instead
152 if ((pool->available() - n) > m_smallAllocationPool->available())
153 m_smallAllocationPool = pool;
154 return pool.release();
155 }
156
157 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
158 static void makeWritable(void* start, size_t size)
159 {
160 reprotectRegion(start, size, Writable);
161 }
162
163 static void makeExecutable(void* start, size_t size)
164 {
165 reprotectRegion(start, size, Executable);
166 }
167 #else
168 static void makeWritable(void*, size_t) {}
169 static void makeExecutable(void*, size_t) {}
170 #endif
171
172
173 #if PLATFORM(X86) || PLATFORM(X86_64)
174 static void cacheFlush(void*, size_t)
175 {
176 }
177 #elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
178 static void cacheFlush(void* code, size_t size)
179 {
180 sys_dcache_flush(code, size);
181 sys_icache_invalidate(code, size);
182 }
183 #else
184 #error "ExecutableAllocator::cacheFlush not implemented on this platform."
185 #endif
186
187 private:
188
189 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
190 static void reprotectRegion(void*, size_t, ProtectionSeting);
191 #endif
192
193 RefPtr<ExecutablePool> m_smallAllocationPool;
194 static void intializePageSize();
195 };
196
197 inline ExecutablePool::ExecutablePool(size_t n)
198 {
199 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
200 Allocation mem = systemAlloc(allocSize);
201 m_pools.append(mem);
202 m_freePtr = mem.pages;
203 if (!m_freePtr)
204 CRASH(); // Failed to allocate
205 m_end = m_freePtr + allocSize;
206 }
207
208 inline void* ExecutablePool::poolAllocate(size_t n)
209 {
210 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
211
212 Allocation result = systemAlloc(allocSize);
213 if (!result.pages)
214 CRASH(); // Failed to allocate
215
216 ASSERT(m_end >= m_freePtr);
217 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
218 // Replace allocation pool
219 m_freePtr = result.pages + n;
220 m_end = result.pages + allocSize;
221 }
222
223 m_pools.append(result);
224 return result.pages;
225 }
226
227 }
228 #endif // ENABLE(ASSEMBLER)
229
230 #endif // !defined(ExecutableAllocator)