]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/ExecutableAllocatorFixedVMPool.cpp
JavaScriptCore-903.5.tar.gz
[apple/javascriptcore.git] / jit / ExecutableAllocatorFixedVMPool.cpp
1 /*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #include "ExecutableAllocator.h"
29
30 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
31
32 #include <errno.h>
33
34 #include "TCSpinLock.h"
35 #include <sys/mman.h>
36 #include <unistd.h>
37 #include <wtf/AVLTree.h>
38 #include <wtf/PageReservation.h>
39 #include <wtf/VMTags.h>
40
41 #if OS(LINUX)
42 #include <stdio.h>
43 #endif
44
45 #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_JIT)
46
47 using namespace WTF;
48
49 namespace JSC {
50
51 #define TwoPow(n) (1ull << n)
52
53 class AllocationTableSizeClass {
54 public:
55 AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
56 : m_blockSize(blockSize)
57 {
58 ASSERT(blockSize == TwoPow(log2BlockSize));
59
60 // Calculate the number of blocks needed to hold size.
61 size_t blockMask = blockSize - 1;
62 m_blockCount = (size + blockMask) >> log2BlockSize;
63
64 // Align to the smallest power of two >= m_blockCount.
65 m_blockAlignment = 1;
66 while (m_blockAlignment < m_blockCount)
67 m_blockAlignment += m_blockAlignment;
68 }
69
70 size_t blockSize() const { return m_blockSize; }
71 size_t blockCount() const { return m_blockCount; }
72 size_t blockAlignment() const { return m_blockAlignment; }
73
74 size_t size()
75 {
76 return m_blockSize * m_blockCount;
77 }
78
79 private:
80 size_t m_blockSize;
81 size_t m_blockCount;
82 size_t m_blockAlignment;
83 };
84
85 template<unsigned log2Entries>
86 class AllocationTableLeaf {
87 typedef uint64_t BitField;
88
89 public:
90 static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
91 static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
92
93 static const size_t subregionSize = TwoPow(log2SubregionSize);
94 static const size_t regionSize = TwoPow(log2RegionSize);
95 static const unsigned entries = TwoPow(log2Entries);
96 COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
97
98 AllocationTableLeaf()
99 : m_allocated(0)
100 {
101 }
102
103 ~AllocationTableLeaf()
104 {
105 ASSERT(isEmpty());
106 }
107
108 size_t allocate(AllocationTableSizeClass& sizeClass)
109 {
110 ASSERT(sizeClass.blockSize() == subregionSize);
111 ASSERT(!isFull());
112
113 size_t alignment = sizeClass.blockAlignment();
114 size_t count = sizeClass.blockCount();
115 // Use this mask to check for spans of free blocks.
116 BitField mask = ((1ull << count) - 1) << (alignment - count);
117
118 // Step in units of alignment size.
119 for (unsigned i = 0; i < entries; i += alignment) {
120 if (!(m_allocated & mask)) {
121 m_allocated |= mask;
122 return (i + (alignment - count)) << log2SubregionSize;
123 }
124 mask <<= alignment;
125 }
126 return notFound;
127 }
128
129 void free(size_t location, AllocationTableSizeClass& sizeClass)
130 {
131 ASSERT(sizeClass.blockSize() == subregionSize);
132
133 size_t entry = location >> log2SubregionSize;
134 size_t count = sizeClass.blockCount();
135 BitField mask = ((1ull << count) - 1) << entry;
136
137 ASSERT((m_allocated & mask) == mask);
138 m_allocated &= ~mask;
139 }
140
141 bool isEmpty()
142 {
143 return !m_allocated;
144 }
145
146 bool isFull()
147 {
148 return !~m_allocated;
149 }
150
151 static size_t size()
152 {
153 return regionSize;
154 }
155
156 static AllocationTableSizeClass classForSize(size_t size)
157 {
158 return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
159 }
160
161 #ifndef NDEBUG
162 void dump(size_t parentOffset = 0, unsigned indent = 0)
163 {
164 for (unsigned i = 0; i < indent; ++i)
165 fprintf(stderr, " ");
166 fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
167 }
168 #endif
169
170 private:
171 BitField m_allocated;
172 };
173
174
175 template<class NextLevel>
176 class LazyAllocationTable {
177 public:
178 static const unsigned log2RegionSize = NextLevel::log2RegionSize;
179 static const unsigned entries = NextLevel::entries;
180
181 LazyAllocationTable()
182 : m_ptr(0)
183 {
184 }
185
186 ~LazyAllocationTable()
187 {
188 ASSERT(isEmpty());
189 }
190
191 size_t allocate(AllocationTableSizeClass& sizeClass)
192 {
193 if (!m_ptr)
194 m_ptr = new NextLevel();
195 return m_ptr->allocate(sizeClass);
196 }
197
198 void free(size_t location, AllocationTableSizeClass& sizeClass)
199 {
200 ASSERT(m_ptr);
201 m_ptr->free(location, sizeClass);
202 if (m_ptr->isEmpty()) {
203 delete m_ptr;
204 m_ptr = 0;
205 }
206 }
207
208 bool isEmpty()
209 {
210 return !m_ptr;
211 }
212
213 bool isFull()
214 {
215 return m_ptr && m_ptr->isFull();
216 }
217
218 static size_t size()
219 {
220 return NextLevel::size();
221 }
222
223 #ifndef NDEBUG
224 void dump(size_t parentOffset = 0, unsigned indent = 0)
225 {
226 ASSERT(m_ptr);
227 m_ptr->dump(parentOffset, indent);
228 }
229 #endif
230
231 static AllocationTableSizeClass classForSize(size_t size)
232 {
233 return NextLevel::classForSize(size);
234 }
235
236 private:
237 NextLevel* m_ptr;
238 };
239
240 template<class NextLevel, unsigned log2Entries>
241 class AllocationTableDirectory {
242 typedef uint64_t BitField;
243
244 public:
245 static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
246 static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
247
248 static const size_t subregionSize = TwoPow(log2SubregionSize);
249 static const size_t regionSize = TwoPow(log2RegionSize);
250 static const unsigned entries = TwoPow(log2Entries);
251 COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
252
253 AllocationTableDirectory()
254 : m_full(0)
255 , m_hasSuballocation(0)
256 {
257 }
258
259 ~AllocationTableDirectory()
260 {
261 ASSERT(isEmpty());
262 }
263
264 size_t allocate(AllocationTableSizeClass& sizeClass)
265 {
266 ASSERT(sizeClass.blockSize() <= subregionSize);
267 ASSERT(!isFull());
268
269 if (sizeClass.blockSize() < subregionSize) {
270 BitField bit = 1;
271 for (unsigned i = 0; i < entries; ++i, bit += bit) {
272 if (m_full & bit)
273 continue;
274 size_t location = m_suballocations[i].allocate(sizeClass);
275 if (location != notFound) {
276 // If this didn't already have a subregion, it does now!
277 m_hasSuballocation |= bit;
278 // Mirror the suballocation's full bit.
279 if (m_suballocations[i].isFull())
280 m_full |= bit;
281 return (i * subregionSize) | location;
282 }
283 }
284 return notFound;
285 }
286
287 // A block is allocated if either it is fully allocated or contains suballocations.
288 BitField allocated = m_full | m_hasSuballocation;
289
290 size_t alignment = sizeClass.blockAlignment();
291 size_t count = sizeClass.blockCount();
292 // Use this mask to check for spans of free blocks.
293 BitField mask = ((1ull << count) - 1) << (alignment - count);
294
295 // Step in units of alignment size.
296 for (unsigned i = 0; i < entries; i += alignment) {
297 if (!(allocated & mask)) {
298 m_full |= mask;
299 return (i + (alignment - count)) << log2SubregionSize;
300 }
301 mask <<= alignment;
302 }
303 return notFound;
304 }
305
306 void free(size_t location, AllocationTableSizeClass& sizeClass)
307 {
308 ASSERT(sizeClass.blockSize() <= subregionSize);
309
310 size_t entry = location >> log2SubregionSize;
311
312 if (sizeClass.blockSize() < subregionSize) {
313 BitField bit = 1ull << entry;
314 m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
315 // Check if the suballocation is now empty.
316 if (m_suballocations[entry].isEmpty())
317 m_hasSuballocation &= ~bit;
318 // No need to check, it clearly isn't full any more!
319 m_full &= ~bit;
320 } else {
321 size_t count = sizeClass.blockCount();
322 BitField mask = ((1ull << count) - 1) << entry;
323 ASSERT((m_full & mask) == mask);
324 ASSERT(!(m_hasSuballocation & mask));
325 m_full &= ~mask;
326 }
327 }
328
329 bool isEmpty()
330 {
331 return !(m_full | m_hasSuballocation);
332 }
333
334 bool isFull()
335 {
336 return !~m_full;
337 }
338
339 static size_t size()
340 {
341 return regionSize;
342 }
343
344 static AllocationTableSizeClass classForSize(size_t size)
345 {
346 if (size < subregionSize) {
347 AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
348 if (sizeClass.size() < NextLevel::size())
349 return sizeClass;
350 }
351 return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
352 }
353
354 #ifndef NDEBUG
355 void dump(size_t parentOffset = 0, unsigned indent = 0)
356 {
357 for (unsigned i = 0; i < indent; ++i)
358 fprintf(stderr, " ");
359 fprintf(stderr, "%08x: [", (int)parentOffset);
360 for (unsigned i = 0; i < entries; ++i) {
361 BitField bit = 1ull << i;
362 char c = m_hasSuballocation & bit
363 ? (m_full & bit ? 'N' : 'n')
364 : (m_full & bit ? 'F' : '-');
365 fprintf(stderr, "%c", c);
366 }
367 fprintf(stderr, "]\n");
368
369 for (unsigned i = 0; i < entries; ++i) {
370 BitField bit = 1ull << i;
371 size_t offset = parentOffset | (subregionSize * i);
372 if (m_hasSuballocation & bit)
373 m_suballocations[i].dump(offset, indent + 1);
374 }
375 }
376 #endif
377
378 private:
379 NextLevel m_suballocations[entries];
380 // Subregions exist in one of four states:
381 // (1) empty (both bits clear)
382 // (2) fully allocated as a single allocation (m_full set)
383 // (3) partially allocated through suballocations (m_hasSuballocation set)
384 // (4) fully allocated through suballocations (both bits set)
385 BitField m_full;
386 BitField m_hasSuballocation;
387 };
388
389
390 typedef AllocationTableLeaf<6> PageTables256KB;
391 typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
392 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
393 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
394
395 #if CPU(ARM)
396 typedef PageTables16MB FixedVMPoolPageTables;
397 #elif CPU(X86_64)
398 typedef PageTables1GB FixedVMPoolPageTables;
399 #else
400 typedef PageTables32MB FixedVMPoolPageTables;
401 #endif
402
403
404 class FixedVMPoolAllocator
405 {
406 public:
407 FixedVMPoolAllocator()
408 {
409 ASSERT(PageTables256KB::size() == 256 * 1024);
410 ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
411 ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
412 ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
413
414 m_reservation = PageReservation::reserveWithGuardPages(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
415 #if !ENABLE(INTERPRETER)
416 if (!isValid())
417 CRASH();
418 #endif
419 }
420
421 ExecutablePool::Allocation alloc(size_t requestedSize)
422 {
423 ASSERT(requestedSize);
424 AllocationTableSizeClass sizeClass = classForSize(requestedSize);
425 size_t size = sizeClass.size();
426 ASSERT(size);
427
428 if (size >= FixedVMPoolPageTables::size())
429 return ExecutablePool::Allocation(0, 0);
430 if (m_pages.isFull())
431 return ExecutablePool::Allocation(0, 0);
432
433 size_t offset = m_pages.allocate(sizeClass);
434 if (offset == notFound)
435 return ExecutablePool::Allocation(0, 0);
436
437 void* pointer = offsetToPointer(offset);
438 m_reservation.commit(pointer, size);
439 return ExecutablePool::Allocation(pointer, size);
440 }
441
442 void free(ExecutablePool::Allocation allocation)
443 {
444 void* pointer = allocation.base();
445 size_t size = allocation.size();
446 ASSERT(size);
447
448 m_reservation.decommit(pointer, size);
449
450 AllocationTableSizeClass sizeClass = classForSize(size);
451 ASSERT(sizeClass.size() == size);
452 m_pages.free(pointerToOffset(pointer), sizeClass);
453 }
454
455 size_t allocated()
456 {
457 return m_reservation.committed();
458 }
459
460 bool isValid() const
461 {
462 return !!m_reservation;
463 }
464
465 private:
466 AllocationTableSizeClass classForSize(size_t size)
467 {
468 return FixedVMPoolPageTables::classForSize(size);
469 }
470
471 void* offsetToPointer(size_t offset)
472 {
473 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
474 }
475
476 size_t pointerToOffset(void* pointer)
477 {
478 return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
479 }
480
481 PageReservation m_reservation;
482 FixedVMPoolPageTables m_pages;
483 };
484
485
486 static SpinLock spinlock = SPINLOCK_INITIALIZER;
487 static FixedVMPoolAllocator* allocator = 0;
488
489
490 size_t ExecutableAllocator::committedByteCount()
491 {
492 SpinLockHolder lockHolder(&spinlock);
493 return allocator ? allocator->allocated() : 0;
494 }
495
496 void ExecutableAllocator::intializePageSize()
497 {
498 ExecutableAllocator::pageSize = getpagesize();
499 }
500
501 bool ExecutableAllocator::isValid() const
502 {
503 SpinLockHolder lock_holder(&spinlock);
504 if (!allocator)
505 allocator = new FixedVMPoolAllocator();
506 return allocator->isValid();
507 }
508
509 bool ExecutableAllocator::underMemoryPressure()
510 {
511 // Technically we should take the spin lock here, but we don't care if we get stale data.
512 // This is only really a heuristic anyway.
513 return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
514 }
515
516 ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
517 {
518 SpinLockHolder lock_holder(&spinlock);
519 ASSERT(allocator);
520 return allocator->alloc(size);
521 }
522
523 void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
524 {
525 SpinLockHolder lock_holder(&spinlock);
526 ASSERT(allocator);
527 allocator->free(allocation);
528 }
529
530 }
531
532
533 #endif // HAVE(ASSEMBLER)
534
535 #if !ENABLE(JIT)
536 // FIXME: Needed to satisfy JavaScriptCore.exp requirements when building only the interpreter.
537 namespace JSC {
538 size_t ExecutableAllocator::committedByteCount()
539 {
540 return 0;
541 }
542 } // namespace JSC
543 #endif // !ENABLE(JIT)