2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
29 #include <stddef.h> // for ptrdiff_t
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
38 #include <libkern/OSCacheControl.h>
47 // From pkfuncs.h (private header file from the Platform Builder)
48 #define CACHE_SYNC_ALL 0x07F
49 extern "C" __declspec(dllimport
) void CacheRangeFlush(LPVOID pAddr
, DWORD dwLength
, DWORD dwFlags
);
52 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
53 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
55 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
56 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
57 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
58 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
60 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
65 inline size_t roundUpAllocationSize(size_t request
, size_t granularity
)
67 if ((std::numeric_limits
<size_t>::max() - granularity
) <= request
)
68 CRASH(); // Allocation is too large
70 // Round up to next page boundary
71 size_t size
= request
+ (granularity
- 1);
72 size
= size
& ~(granularity
- 1);
73 ASSERT(size
>= request
);
83 class ExecutablePool
: public RefCounted
<ExecutablePool
> {
92 typedef Vector
<Allocation
, 2> AllocationList
;
95 static PassRefPtr
<ExecutablePool
> create(size_t n
)
97 return adoptRef(new ExecutablePool(n
));
100 void* alloc(size_t n
)
102 ASSERT(m_freePtr
<= m_end
);
104 // Round 'n' up to a multiple of word size; if all allocations are of
105 // word sized quantities, then all subsequent allocations will be aligned.
106 n
= roundUpAllocationSize(n
, sizeof(void*));
108 if (static_cast<ptrdiff_t>(n
) < (m_end
- m_freePtr
)) {
109 void* result
= m_freePtr
;
114 // Insufficient space to allocate in the existing pool
115 // so we need allocate into a new pool
116 return poolAllocate(n
);
121 AllocationList::const_iterator end
= m_pools
.end();
122 for (AllocationList::const_iterator ptr
= m_pools
.begin(); ptr
!= end
; ++ptr
)
123 ExecutablePool::systemRelease(*ptr
);
126 size_t available() const { return (m_pools
.size() > 1) ? 0 : m_end
- m_freePtr
; }
129 static Allocation
systemAlloc(size_t n
);
130 static void systemRelease(const Allocation
& alloc
);
132 ExecutablePool(size_t n
);
134 void* poolAllocate(size_t n
);
138 AllocationList m_pools
;
141 class ExecutableAllocator
{
142 enum ProtectionSeting
{ Writable
, Executable
};
145 static size_t pageSize
;
146 ExecutableAllocator()
150 m_smallAllocationPool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
153 PassRefPtr
<ExecutablePool
> poolForSize(size_t n
)
155 // Try to fit in the existing small allocator
156 if (n
< m_smallAllocationPool
->available())
157 return m_smallAllocationPool
;
159 // If the request is large, we just provide a unshared allocator
160 if (n
> JIT_ALLOCATOR_LARGE_ALLOC_SIZE
)
161 return ExecutablePool::create(n
);
163 // Create a new allocator
164 RefPtr
<ExecutablePool
> pool
= ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
166 // If the new allocator will result in more free space than in
167 // the current small allocator, then we will use it instead
168 if ((pool
->available() - n
) > m_smallAllocationPool
->available())
169 m_smallAllocationPool
= pool
;
170 return pool
.release();
173 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
174 static void makeWritable(void* start
, size_t size
)
176 reprotectRegion(start
, size
, Writable
);
179 static void makeExecutable(void* start
, size_t size
)
181 reprotectRegion(start
, size
, Executable
);
184 static void makeWritable(void*, size_t) {}
185 static void makeExecutable(void*, size_t) {}
189 #if CPU(X86) || CPU(X86_64)
190 static void cacheFlush(void*, size_t)
193 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
194 static void cacheFlush(void* code
, size_t size
)
196 sys_dcache_flush(code
, size
);
197 sys_icache_invalidate(code
, size
);
199 #elif CPU(ARM_THUMB2) && OS(LINUX)
200 static void cacheFlush(void* code
, size_t size
)
212 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
216 static void cacheFlush(void* code
, size_t size
)
218 User::IMB_Range(code
, static_cast<char*>(code
) + size
);
220 #elif CPU(ARM_TRADITIONAL) && OS(LINUX)
221 static void cacheFlush(void* code
, size_t size
)
233 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
237 static void cacheFlush(void* code
, size_t size
)
239 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
242 #error "The cacheFlush support is missing on this platform."
247 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
248 static void reprotectRegion(void*, size_t, ProtectionSeting
);
251 RefPtr
<ExecutablePool
> m_smallAllocationPool
;
252 static void intializePageSize();
255 inline ExecutablePool::ExecutablePool(size_t n
)
257 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
258 Allocation mem
= systemAlloc(allocSize
);
260 m_freePtr
= mem
.pages
;
262 CRASH(); // Failed to allocate
263 m_end
= m_freePtr
+ allocSize
;
266 inline void* ExecutablePool::poolAllocate(size_t n
)
268 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
270 Allocation result
= systemAlloc(allocSize
);
272 CRASH(); // Failed to allocate
274 ASSERT(m_end
>= m_freePtr
);
275 if ((allocSize
- n
) > static_cast<size_t>(m_end
- m_freePtr
)) {
276 // Replace allocation pool
277 m_freePtr
= result
.pages
+ n
;
278 m_end
= result
.pages
+ allocSize
;
281 m_pools
.append(result
);
287 #endif // ENABLE(ASSEMBLER)
289 #endif // !defined(ExecutableAllocator)