2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28 #include <stddef.h> // for ptrdiff_t
30 #include <wtf/Assertions.h>
31 #include <wtf/PageAllocation.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
38 #include <libkern/OSCacheControl.h>
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
50 #if CPU(SH4) && OS(LINUX)
51 #include <asm/cachectl.h>
52 #include <asm/unistd.h>
53 #include <sys/syscall.h>
58 // From pkfuncs.h (private header file from the Platform Builder)
59 #define CACHE_SYNC_ALL 0x07F
60 extern "C" __declspec(dllimport
) void CacheRangeFlush(LPVOID pAddr
, DWORD dwLength
, DWORD dwFlags
);
64 #include <AEEIMemCache1.h>
65 #include <AEEMemCache1.bid>
66 #include <wtf/brew/RefPtrBrew.h>
69 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
70 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
72 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
73 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
74 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
75 #define EXECUTABLE_POOL_WRITABLE false
77 #define EXECUTABLE_POOL_WRITABLE true
82 inline size_t roundUpAllocationSize(size_t request
, size_t granularity
)
84 if ((std::numeric_limits
<size_t>::max() - granularity
) <= request
)
85 CRASH(); // Allocation is too large
87 // Round up to next page boundary
88 size_t size
= request
+ (granularity
- 1);
89 size
= size
& ~(granularity
- 1);
90 ASSERT(size
>= request
);
96 #if ENABLE(JIT) && ENABLE(ASSEMBLER)
101 void releaseExecutableMemory(JSGlobalData
&);
103 class ExecutablePool
: public RefCounted
<ExecutablePool
> {
105 #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
106 typedef PageAllocation Allocation
;
110 Allocation(void* base
, size_t size
)
115 void* base() { return m_base
; }
116 size_t size() { return m_size
; }
117 bool operator!() const { return !m_base
; }
124 typedef Vector
<Allocation
, 2> AllocationList
;
126 static PassRefPtr
<ExecutablePool
> create(JSGlobalData
& globalData
, size_t n
)
128 return adoptRef(new ExecutablePool(globalData
, n
));
131 void* alloc(JSGlobalData
& globalData
, size_t n
)
133 ASSERT(m_freePtr
<= m_end
);
135 // Round 'n' up to a multiple of word size; if all allocations are of
136 // word sized quantities, then all subsequent allocations will be aligned.
137 n
= roundUpAllocationSize(n
, sizeof(void*));
139 if (static_cast<ptrdiff_t>(n
) < (m_end
- m_freePtr
)) {
140 void* result
= m_freePtr
;
145 // Insufficient space to allocate in the existing pool
146 // so we need allocate into a new pool
147 return poolAllocate(globalData
, n
);
150 void tryShrink(void* allocation
, size_t oldSize
, size_t newSize
)
152 if (static_cast<char*>(allocation
) + oldSize
!= m_freePtr
)
154 m_freePtr
= static_cast<char*>(allocation
) + roundUpAllocationSize(newSize
, sizeof(void*));
159 AllocationList::iterator end
= m_pools
.end();
160 for (AllocationList::iterator ptr
= m_pools
.begin(); ptr
!= end
; ++ptr
)
161 ExecutablePool::systemRelease(*ptr
);
164 size_t available() const { return (m_pools
.size() > 1) ? 0 : m_end
- m_freePtr
; }
167 static Allocation
systemAlloc(size_t n
);
168 static void systemRelease(Allocation
& alloc
);
170 ExecutablePool(JSGlobalData
&, size_t n
);
172 void* poolAllocate(JSGlobalData
&, size_t n
);
176 AllocationList m_pools
;
179 class ExecutableAllocator
{
180 enum ProtectionSetting
{ Writable
, Executable
};
183 static size_t pageSize
;
184 ExecutableAllocator(JSGlobalData
& globalData
)
189 m_smallAllocationPool
= ExecutablePool::create(globalData
, JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
190 #if !ENABLE(INTERPRETER)
196 bool isValid() const;
198 static bool underMemoryPressure();
200 PassRefPtr
<ExecutablePool
> poolForSize(JSGlobalData
& globalData
, size_t n
)
202 // Try to fit in the existing small allocator
203 ASSERT(m_smallAllocationPool
);
204 if (n
< m_smallAllocationPool
->available())
205 return m_smallAllocationPool
;
207 // If the request is large, we just provide a unshared allocator
208 if (n
> JIT_ALLOCATOR_LARGE_ALLOC_SIZE
)
209 return ExecutablePool::create(globalData
, n
);
211 // Create a new allocator
212 RefPtr
<ExecutablePool
> pool
= ExecutablePool::create(globalData
, JIT_ALLOCATOR_LARGE_ALLOC_SIZE
);
214 // If the new allocator will result in more free space than in
215 // the current small allocator, then we will use it instead
216 if ((pool
->available() - n
) > m_smallAllocationPool
->available())
217 m_smallAllocationPool
= pool
;
218 return pool
.release();
221 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
222 static void makeWritable(void* start
, size_t size
)
224 reprotectRegion(start
, size
, Writable
);
227 static void makeExecutable(void* start
, size_t size
)
229 reprotectRegion(start
, size
, Executable
);
232 static void makeWritable(void*, size_t) {}
233 static void makeExecutable(void*, size_t) {}
237 #if CPU(X86) || CPU(X86_64)
238 static void cacheFlush(void*, size_t)
242 static void cacheFlush(void* code
, size_t size
)
244 #if GCC_VERSION_AT_LEAST(4, 3, 0)
245 #if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
247 asm("rdhwr %0, $1" : "=r" (lineSize
));
249 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
250 // mips_expand_synci_loop that may execute synci one more time.
251 // "start" points to the fisrt byte of the cache line.
252 // "end" points to the last byte of the line before the last cache line.
253 // Because size is always a multiple of 4, this is safe to set
254 // "end" to the last byte.
256 intptr_t start
= reinterpret_cast<intptr_t>(code
) & (-lineSize
);
257 intptr_t end
= ((reinterpret_cast<intptr_t>(code
) + size
- 1) & (-lineSize
)) - 1;
258 __builtin___clear_cache(reinterpret_cast<char*>(start
), reinterpret_cast<char*>(end
));
260 intptr_t end
= reinterpret_cast<intptr_t>(code
) + size
;
261 __builtin___clear_cache(reinterpret_cast<char*>(code
), reinterpret_cast<char*>(end
));
264 _flush_cache(reinterpret_cast<char*>(code
), size
, BCACHE
);
267 #elif CPU(ARM_THUMB2) && OS(IOS)
268 static void cacheFlush(void* code
, size_t size
)
270 sys_cache_control(kCacheFunctionPrepareForExecution
, code
, size
);
272 #elif CPU(ARM_THUMB2) && OS(LINUX)
273 static void cacheFlush(void* code
, size_t size
)
285 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
289 static void cacheFlush(void* code
, size_t size
)
291 User::IMB_Range(code
, static_cast<char*>(code
) + size
);
293 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
294 static __asm
void cacheFlush(void* code
, size_t size
);
295 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
296 static void cacheFlush(void* code
, size_t size
)
308 : "r" (code
), "r" (reinterpret_cast<char*>(code
) + size
)
312 static void cacheFlush(void* code
, size_t size
)
314 CacheRangeFlush(code
, size
, CACHE_SYNC_ALL
);
316 #elif PLATFORM(BREWMP)
317 static void cacheFlush(void* code
, size_t size
)
319 RefPtr
<IMemCache1
> memCache
= createRefPtrInstance
<IMemCache1
>(AEECLSID_MemCache1
);
320 IMemCache1_ClearCache(memCache
.get(), reinterpret_cast<uint32
>(code
), size
, MEMSPACE_CACHE_FLUSH
, MEMSPACE_DATACACHE
);
321 IMemCache1_ClearCache(memCache
.get(), reinterpret_cast<uint32
>(code
), size
, MEMSPACE_CACHE_INVALIDATE
, MEMSPACE_INSTCACHE
);
323 #elif CPU(SH4) && OS(LINUX)
324 static void cacheFlush(void* code
, size_t size
)
326 #ifdef CACHEFLUSH_D_L2
327 syscall(__NR_cacheflush
, reinterpret_cast<unsigned>(code
), size
, CACHEFLUSH_D_WB
| CACHEFLUSH_I
| CACHEFLUSH_D_L2
);
329 syscall(__NR_cacheflush
, reinterpret_cast<unsigned>(code
), size
, CACHEFLUSH_D_WB
| CACHEFLUSH_I
);
333 #error "The cacheFlush support is missing on this platform."
335 static size_t committedByteCount();
339 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
340 static void reprotectRegion(void*, size_t, ProtectionSetting
);
343 RefPtr
<ExecutablePool
> m_smallAllocationPool
;
344 static void intializePageSize();
347 inline ExecutablePool::ExecutablePool(JSGlobalData
& globalData
, size_t n
)
349 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
350 Allocation mem
= systemAlloc(allocSize
);
352 releaseExecutableMemory(globalData
);
353 mem
= systemAlloc(allocSize
);
356 m_freePtr
= static_cast<char*>(mem
.base());
358 CRASH(); // Failed to allocate
359 m_end
= m_freePtr
+ allocSize
;
362 inline void* ExecutablePool::poolAllocate(JSGlobalData
& globalData
, size_t n
)
364 size_t allocSize
= roundUpAllocationSize(n
, JIT_ALLOCATOR_PAGE_SIZE
);
366 Allocation result
= systemAlloc(allocSize
);
367 if (!result
.base()) {
368 releaseExecutableMemory(globalData
);
369 result
= systemAlloc(allocSize
);
371 CRASH(); // Failed to allocate
374 ASSERT(m_end
>= m_freePtr
);
375 if ((allocSize
- n
) > static_cast<size_t>(m_end
- m_freePtr
)) {
376 // Replace allocation pool
377 m_freePtr
= static_cast<char*>(result
.base()) + n
;
378 m_end
= static_cast<char*>(result
.base()) + allocSize
;
381 m_pools
.append(result
);
382 return result
.base();
391 class ExecutableAllocator
{
393 static size_t committedByteCount();
398 #endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
400 #endif // !defined(ExecutableAllocator)