2 * Copyright (C) 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "ExecutableAllocator.h"
30 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
34 #include "TCSpinLock.h"
37 #include <wtf/AVLTree.h>
38 #include <wtf/PageReservation.h>
39 #include <wtf/VMTags.h>
45 #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_JIT)
51 #define TwoPow(n) (1ull << n)
53 class AllocationTableSizeClass
{
55 AllocationTableSizeClass(size_t size
, size_t blockSize
, unsigned log2BlockSize
)
56 : m_blockSize(blockSize
)
58 ASSERT(blockSize
== TwoPow(log2BlockSize
));
60 // Calculate the number of blocks needed to hold size.
61 size_t blockMask
= blockSize
- 1;
62 m_blockCount
= (size
+ blockMask
) >> log2BlockSize
;
64 // Align to the smallest power of two >= m_blockCount.
66 while (m_blockAlignment
< m_blockCount
)
67 m_blockAlignment
+= m_blockAlignment
;
70 size_t blockSize() const { return m_blockSize
; }
71 size_t blockCount() const { return m_blockCount
; }
72 size_t blockAlignment() const { return m_blockAlignment
; }
76 return m_blockSize
* m_blockCount
;
82 size_t m_blockAlignment
;
85 template<unsigned log2Entries
>
86 class AllocationTableLeaf
{
87 typedef uint64_t BitField
;
90 static const unsigned log2SubregionSize
= 12; // 2^12 == pagesize
91 static const unsigned log2RegionSize
= log2SubregionSize
+ log2Entries
;
93 static const size_t subregionSize
= TwoPow(log2SubregionSize
);
94 static const size_t regionSize
= TwoPow(log2RegionSize
);
95 static const unsigned entries
= TwoPow(log2Entries
);
96 COMPILE_ASSERT(entries
<= (sizeof(BitField
) * 8), AllocationTableLeaf_entries_fit_in_BitField
);
103 ~AllocationTableLeaf()
108 size_t allocate(AllocationTableSizeClass
& sizeClass
)
110 ASSERT(sizeClass
.blockSize() == subregionSize
);
113 size_t alignment
= sizeClass
.blockAlignment();
114 size_t count
= sizeClass
.blockCount();
115 // Use this mask to check for spans of free blocks.
116 BitField mask
= ((1ull << count
) - 1) << (alignment
- count
);
118 // Step in units of alignment size.
119 for (unsigned i
= 0; i
< entries
; i
+= alignment
) {
120 if (!(m_allocated
& mask
)) {
122 return (i
+ (alignment
- count
)) << log2SubregionSize
;
129 void free(size_t location
, AllocationTableSizeClass
& sizeClass
)
131 ASSERT(sizeClass
.blockSize() == subregionSize
);
133 size_t entry
= location
>> log2SubregionSize
;
134 size_t count
= sizeClass
.blockCount();
135 BitField mask
= ((1ull << count
) - 1) << entry
;
137 ASSERT((m_allocated
& mask
) == mask
);
138 m_allocated
&= ~mask
;
148 return !~m_allocated
;
156 static AllocationTableSizeClass
classForSize(size_t size
)
158 return AllocationTableSizeClass(size
, subregionSize
, log2SubregionSize
);
162 void dump(size_t parentOffset
= 0, unsigned indent
= 0)
164 for (unsigned i
= 0; i
< indent
; ++i
)
165 fprintf(stderr
, " ");
166 fprintf(stderr
, "%08x: [%016llx]\n", (int)parentOffset
, m_allocated
);
171 BitField m_allocated
;
175 template<class NextLevel
>
176 class LazyAllocationTable
{
178 static const unsigned log2RegionSize
= NextLevel::log2RegionSize
;
179 static const unsigned entries
= NextLevel::entries
;
181 LazyAllocationTable()
186 ~LazyAllocationTable()
191 size_t allocate(AllocationTableSizeClass
& sizeClass
)
194 m_ptr
= new NextLevel();
195 return m_ptr
->allocate(sizeClass
);
198 void free(size_t location
, AllocationTableSizeClass
& sizeClass
)
201 m_ptr
->free(location
, sizeClass
);
202 if (m_ptr
->isEmpty()) {
215 return m_ptr
&& m_ptr
->isFull();
220 return NextLevel::size();
224 void dump(size_t parentOffset
= 0, unsigned indent
= 0)
227 m_ptr
->dump(parentOffset
, indent
);
231 static AllocationTableSizeClass
classForSize(size_t size
)
233 return NextLevel::classForSize(size
);
240 template<class NextLevel
, unsigned log2Entries
>
241 class AllocationTableDirectory
{
242 typedef uint64_t BitField
;
245 static const unsigned log2SubregionSize
= NextLevel::log2RegionSize
;
246 static const unsigned log2RegionSize
= log2SubregionSize
+ log2Entries
;
248 static const size_t subregionSize
= TwoPow(log2SubregionSize
);
249 static const size_t regionSize
= TwoPow(log2RegionSize
);
250 static const unsigned entries
= TwoPow(log2Entries
);
251 COMPILE_ASSERT(entries
<= (sizeof(BitField
) * 8), AllocationTableDirectory_entries_fit_in_BitField
);
253 AllocationTableDirectory()
255 , m_hasSuballocation(0)
259 ~AllocationTableDirectory()
264 size_t allocate(AllocationTableSizeClass
& sizeClass
)
266 ASSERT(sizeClass
.blockSize() <= subregionSize
);
269 if (sizeClass
.blockSize() < subregionSize
) {
271 for (unsigned i
= 0; i
< entries
; ++i
, bit
+= bit
) {
274 size_t location
= m_suballocations
[i
].allocate(sizeClass
);
275 if (location
!= notFound
) {
276 // If this didn't already have a subregion, it does now!
277 m_hasSuballocation
|= bit
;
278 // Mirror the suballocation's full bit.
279 if (m_suballocations
[i
].isFull())
281 return (i
* subregionSize
) | location
;
287 // A block is allocated if either it is fully allocated or contains suballocations.
288 BitField allocated
= m_full
| m_hasSuballocation
;
290 size_t alignment
= sizeClass
.blockAlignment();
291 size_t count
= sizeClass
.blockCount();
292 // Use this mask to check for spans of free blocks.
293 BitField mask
= ((1ull << count
) - 1) << (alignment
- count
);
295 // Step in units of alignment size.
296 for (unsigned i
= 0; i
< entries
; i
+= alignment
) {
297 if (!(allocated
& mask
)) {
299 return (i
+ (alignment
- count
)) << log2SubregionSize
;
306 void free(size_t location
, AllocationTableSizeClass
& sizeClass
)
308 ASSERT(sizeClass
.blockSize() <= subregionSize
);
310 size_t entry
= location
>> log2SubregionSize
;
312 if (sizeClass
.blockSize() < subregionSize
) {
313 BitField bit
= 1ull << entry
;
314 m_suballocations
[entry
].free(location
& (subregionSize
- 1), sizeClass
);
315 // Check if the suballocation is now empty.
316 if (m_suballocations
[entry
].isEmpty())
317 m_hasSuballocation
&= ~bit
;
318 // No need to check, it clearly isn't full any more!
321 size_t count
= sizeClass
.blockCount();
322 BitField mask
= ((1ull << count
) - 1) << entry
;
323 ASSERT((m_full
& mask
) == mask
);
324 ASSERT(!(m_hasSuballocation
& mask
));
331 return !(m_full
| m_hasSuballocation
);
344 static AllocationTableSizeClass
classForSize(size_t size
)
346 if (size
< subregionSize
) {
347 AllocationTableSizeClass sizeClass
= NextLevel::classForSize(size
);
348 if (sizeClass
.size() < NextLevel::size())
351 return AllocationTableSizeClass(size
, subregionSize
, log2SubregionSize
);
355 void dump(size_t parentOffset
= 0, unsigned indent
= 0)
357 for (unsigned i
= 0; i
< indent
; ++i
)
358 fprintf(stderr
, " ");
359 fprintf(stderr
, "%08x: [", (int)parentOffset
);
360 for (unsigned i
= 0; i
< entries
; ++i
) {
361 BitField bit
= 1ull << i
;
362 char c
= m_hasSuballocation
& bit
363 ? (m_full
& bit
? 'N' : 'n')
364 : (m_full
& bit
? 'F' : '-');
365 fprintf(stderr
, "%c", c
);
367 fprintf(stderr
, "]\n");
369 for (unsigned i
= 0; i
< entries
; ++i
) {
370 BitField bit
= 1ull << i
;
371 size_t offset
= parentOffset
| (subregionSize
* i
);
372 if (m_hasSuballocation
& bit
)
373 m_suballocations
[i
].dump(offset
, indent
+ 1);
379 NextLevel m_suballocations
[entries
];
380 // Subregions exist in one of four states:
381 // (1) empty (both bits clear)
382 // (2) fully allocated as a single allocation (m_full set)
383 // (3) partially allocated through suballocations (m_hasSuballocation set)
384 // (4) fully allocated through suballocations (both bits set)
386 BitField m_hasSuballocation
;
390 typedef AllocationTableLeaf
<6> PageTables256KB
;
391 typedef AllocationTableDirectory
<PageTables256KB
, 6> PageTables16MB
;
392 typedef AllocationTableDirectory
<LazyAllocationTable
<PageTables16MB
>, 1> PageTables32MB
;
393 typedef AllocationTableDirectory
<LazyAllocationTable
<PageTables16MB
>, 6> PageTables1GB
;
396 typedef PageTables16MB FixedVMPoolPageTables
;
398 typedef PageTables1GB FixedVMPoolPageTables
;
400 typedef PageTables32MB FixedVMPoolPageTables
;
404 class FixedVMPoolAllocator
407 FixedVMPoolAllocator()
409 ASSERT(PageTables256KB::size() == 256 * 1024);
410 ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
411 ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
412 ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
414 m_reservation
= PageReservation::reserveWithGuardPages(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages
, EXECUTABLE_POOL_WRITABLE
, true);
415 #if !ENABLE(INTERPRETER)
421 ExecutablePool::Allocation
alloc(size_t requestedSize
)
423 ASSERT(requestedSize
);
424 AllocationTableSizeClass sizeClass
= classForSize(requestedSize
);
425 size_t size
= sizeClass
.size();
428 if (size
>= FixedVMPoolPageTables::size())
429 return ExecutablePool::Allocation(0, 0);
430 if (m_pages
.isFull())
431 return ExecutablePool::Allocation(0, 0);
433 size_t offset
= m_pages
.allocate(sizeClass
);
434 if (offset
== notFound
)
435 return ExecutablePool::Allocation(0, 0);
437 void* pointer
= offsetToPointer(offset
);
438 m_reservation
.commit(pointer
, size
);
439 return ExecutablePool::Allocation(pointer
, size
);
442 void free(ExecutablePool::Allocation allocation
)
444 void* pointer
= allocation
.base();
445 size_t size
= allocation
.size();
448 m_reservation
.decommit(pointer
, size
);
450 AllocationTableSizeClass sizeClass
= classForSize(size
);
451 ASSERT(sizeClass
.size() == size
);
452 m_pages
.free(pointerToOffset(pointer
), sizeClass
);
457 return m_reservation
.committed();
462 return !!m_reservation
;
466 AllocationTableSizeClass
classForSize(size_t size
)
468 return FixedVMPoolPageTables::classForSize(size
);
471 void* offsetToPointer(size_t offset
)
473 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation
.base()) + offset
);
476 size_t pointerToOffset(void* pointer
)
478 return reinterpret_cast<intptr_t>(pointer
) - reinterpret_cast<intptr_t>(m_reservation
.base());
481 PageReservation m_reservation
;
482 FixedVMPoolPageTables m_pages
;
486 static SpinLock spinlock
= SPINLOCK_INITIALIZER
;
487 static FixedVMPoolAllocator
* allocator
= 0;
490 size_t ExecutableAllocator::committedByteCount()
492 SpinLockHolder
lockHolder(&spinlock
);
493 return allocator
? allocator
->allocated() : 0;
496 void ExecutableAllocator::intializePageSize()
498 ExecutableAllocator::pageSize
= getpagesize();
501 bool ExecutableAllocator::isValid() const
503 SpinLockHolder
lock_holder(&spinlock
);
505 allocator
= new FixedVMPoolAllocator();
506 return allocator
->isValid();
509 bool ExecutableAllocator::underMemoryPressure()
511 // Technically we should take the spin lock here, but we don't care if we get stale data.
512 // This is only really a heuristic anyway.
513 return allocator
&& (allocator
->allocated() > (FixedVMPoolPageTables::size() / 2));
516 ExecutablePool::Allocation
ExecutablePool::systemAlloc(size_t size
)
518 SpinLockHolder
lock_holder(&spinlock
);
520 return allocator
->alloc(size
);
523 void ExecutablePool::systemRelease(ExecutablePool::Allocation
& allocation
)
525 SpinLockHolder
lock_holder(&spinlock
);
527 allocator
->free(allocation
);
533 #endif // HAVE(ASSEMBLER)
536 // FIXME: Needed to satisfy JavaScriptCore.exp requirements when building only the interpreter.
538 size_t ExecutableAllocator::committedByteCount()
543 #endif // !ENABLE(JIT)