]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #include "ExecutableAllocator.h" | |
29 | ||
30 | #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) | |
31 | ||
32 | #include <errno.h> | |
33 | ||
34 | #include "TCSpinLock.h" | |
35 | #include <sys/mman.h> | |
36 | #include <unistd.h> | |
37 | #include <wtf/AVLTree.h> | |
38 | #include <wtf/VMTags.h> | |
39 | ||
40 | #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_JIT) | |
41 | ||
42 | using namespace WTF; | |
43 | ||
44 | namespace JSC { | |
45 | ||
46 | #define TwoPow(n) (1ull << n) | |
47 | ||
48 | class AllocationTableSizeClass { | |
49 | public: | |
50 | AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize) | |
51 | : m_blockSize(blockSize) | |
52 | { | |
53 | ASSERT(blockSize == TwoPow(log2BlockSize)); | |
54 | ||
55 | // Calculate the number of blocks needed to hold size. | |
56 | size_t blockMask = blockSize - 1; | |
57 | m_blockCount = (size + blockMask) >> log2BlockSize; | |
58 | ||
59 | // Align to the smallest power of two >= m_blockCount. | |
60 | m_blockAlignment = 1; | |
61 | while (m_blockAlignment < m_blockCount) | |
62 | m_blockAlignment += m_blockAlignment; | |
63 | } | |
64 | ||
65 | size_t blockSize() const { return m_blockSize; } | |
66 | size_t blockCount() const { return m_blockCount; } | |
67 | size_t blockAlignment() const { return m_blockAlignment; } | |
68 | ||
69 | size_t size() | |
70 | { | |
71 | return m_blockSize * m_blockCount; | |
72 | } | |
73 | ||
74 | private: | |
75 | size_t m_blockSize; | |
76 | size_t m_blockCount; | |
77 | size_t m_blockAlignment; | |
78 | }; | |
79 | ||
80 | template<unsigned log2Entries> | |
81 | class AllocationTableLeaf { | |
82 | typedef uint64_t BitField; | |
83 | ||
84 | public: | |
85 | static const unsigned log2SubregionSize = 12; // 2^12 == pagesize | |
86 | static const unsigned log2RegionSize = log2SubregionSize + log2Entries; | |
87 | ||
88 | static const size_t subregionSize = TwoPow(log2SubregionSize); | |
89 | static const size_t regionSize = TwoPow(log2RegionSize); | |
90 | static const unsigned entries = TwoPow(log2Entries); | |
91 | COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField); | |
92 | ||
93 | AllocationTableLeaf() | |
94 | : m_allocated(0) | |
95 | { | |
96 | } | |
97 | ||
98 | ~AllocationTableLeaf() | |
99 | { | |
100 | ASSERT(isEmpty()); | |
101 | } | |
102 | ||
103 | size_t allocate(AllocationTableSizeClass& sizeClass) | |
104 | { | |
105 | ASSERT(sizeClass.blockSize() == subregionSize); | |
106 | ASSERT(!isFull()); | |
107 | ||
108 | size_t alignment = sizeClass.blockAlignment(); | |
109 | size_t count = sizeClass.blockCount(); | |
110 | // Use this mask to check for spans of free blocks. | |
111 | BitField mask = ((1ull << count) - 1) << (alignment - count); | |
112 | ||
113 | // Step in units of alignment size. | |
114 | for (unsigned i = 0; i < entries; i += alignment) { | |
115 | if (!(m_allocated & mask)) { | |
116 | m_allocated |= mask; | |
117 | return (i + (alignment - count)) << log2SubregionSize; | |
118 | } | |
119 | mask <<= alignment; | |
120 | } | |
121 | return notFound; | |
122 | } | |
123 | ||
124 | void free(size_t location, AllocationTableSizeClass& sizeClass) | |
125 | { | |
126 | ASSERT(sizeClass.blockSize() == subregionSize); | |
127 | ||
128 | size_t entry = location >> log2SubregionSize; | |
129 | size_t count = sizeClass.blockCount(); | |
130 | BitField mask = ((1ull << count) - 1) << entry; | |
131 | ||
132 | ASSERT((m_allocated & mask) == mask); | |
133 | m_allocated &= ~mask; | |
134 | } | |
135 | ||
136 | bool isEmpty() | |
137 | { | |
138 | return !m_allocated; | |
139 | } | |
140 | ||
141 | bool isFull() | |
142 | { | |
143 | return !~m_allocated; | |
144 | } | |
145 | ||
146 | static size_t size() | |
147 | { | |
148 | return regionSize; | |
149 | } | |
150 | ||
151 | static AllocationTableSizeClass classForSize(size_t size) | |
152 | { | |
153 | return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); | |
154 | } | |
155 | ||
156 | #ifndef NDEBUG | |
157 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
158 | { | |
159 | for (unsigned i = 0; i < indent; ++i) | |
160 | fprintf(stderr, " "); | |
161 | fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated); | |
162 | } | |
163 | #endif | |
164 | ||
165 | private: | |
166 | BitField m_allocated; | |
167 | }; | |
168 | ||
169 | ||
170 | template<class NextLevel> | |
171 | class LazyAllocationTable { | |
172 | public: | |
173 | static const unsigned log2RegionSize = NextLevel::log2RegionSize; | |
174 | static const unsigned entries = NextLevel::entries; | |
175 | ||
176 | LazyAllocationTable() | |
177 | : m_ptr(0) | |
178 | { | |
179 | } | |
180 | ||
181 | ~LazyAllocationTable() | |
182 | { | |
183 | ASSERT(isEmpty()); | |
184 | } | |
185 | ||
186 | size_t allocate(AllocationTableSizeClass& sizeClass) | |
187 | { | |
188 | if (!m_ptr) | |
189 | m_ptr = new NextLevel(); | |
190 | return m_ptr->allocate(sizeClass); | |
191 | } | |
192 | ||
193 | void free(size_t location, AllocationTableSizeClass& sizeClass) | |
194 | { | |
195 | ASSERT(m_ptr); | |
196 | m_ptr->free(location, sizeClass); | |
197 | if (m_ptr->isEmpty()) { | |
198 | delete m_ptr; | |
199 | m_ptr = 0; | |
200 | } | |
201 | } | |
202 | ||
203 | bool isEmpty() | |
204 | { | |
205 | return !m_ptr; | |
206 | } | |
207 | ||
208 | bool isFull() | |
209 | { | |
210 | return m_ptr && m_ptr->isFull(); | |
211 | } | |
212 | ||
213 | static size_t size() | |
214 | { | |
215 | return NextLevel::size(); | |
216 | } | |
217 | ||
218 | #ifndef NDEBUG | |
219 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
220 | { | |
221 | ASSERT(m_ptr); | |
222 | m_ptr->dump(parentOffset, indent); | |
223 | } | |
224 | #endif | |
225 | ||
226 | static AllocationTableSizeClass classForSize(size_t size) | |
227 | { | |
228 | return NextLevel::classForSize(size); | |
229 | } | |
230 | ||
231 | private: | |
232 | NextLevel* m_ptr; | |
233 | }; | |
234 | ||
235 | template<class NextLevel, unsigned log2Entries> | |
236 | class AllocationTableDirectory { | |
237 | typedef uint64_t BitField; | |
238 | ||
239 | public: | |
240 | static const unsigned log2SubregionSize = NextLevel::log2RegionSize; | |
241 | static const unsigned log2RegionSize = log2SubregionSize + log2Entries; | |
242 | ||
243 | static const size_t subregionSize = TwoPow(log2SubregionSize); | |
244 | static const size_t regionSize = TwoPow(log2RegionSize); | |
245 | static const unsigned entries = TwoPow(log2Entries); | |
246 | COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField); | |
247 | ||
248 | AllocationTableDirectory() | |
249 | : m_full(0) | |
250 | , m_hasSuballocation(0) | |
251 | { | |
252 | } | |
253 | ||
254 | ~AllocationTableDirectory() | |
255 | { | |
256 | ASSERT(isEmpty()); | |
257 | } | |
258 | ||
259 | size_t allocate(AllocationTableSizeClass& sizeClass) | |
260 | { | |
261 | ASSERT(sizeClass.blockSize() <= subregionSize); | |
262 | ASSERT(!isFull()); | |
263 | ||
264 | if (sizeClass.blockSize() < subregionSize) { | |
265 | BitField bit = 1; | |
266 | for (unsigned i = 0; i < entries; ++i, bit += bit) { | |
267 | if (m_full & bit) | |
268 | continue; | |
269 | size_t location = m_suballocations[i].allocate(sizeClass); | |
270 | if (location != notFound) { | |
271 | // If this didn't already have a subregion, it does now! | |
272 | m_hasSuballocation |= bit; | |
273 | // Mirror the suballocation's full bit. | |
274 | if (m_suballocations[i].isFull()) | |
275 | m_full |= bit; | |
276 | return (i * subregionSize) + location; | |
277 | } | |
278 | } | |
279 | return notFound; | |
280 | } | |
281 | ||
282 | // A block is allocated if either it is fully allocated or contains suballocations. | |
283 | BitField allocated = m_full | m_hasSuballocation; | |
284 | ||
285 | size_t alignment = sizeClass.blockAlignment(); | |
286 | size_t count = sizeClass.blockCount(); | |
287 | // Use this mask to check for spans of free blocks. | |
288 | BitField mask = ((1ull << count) - 1) << (alignment - count); | |
289 | ||
290 | // Step in units of alignment size. | |
291 | for (unsigned i = 0; i < entries; i += alignment) { | |
292 | if (!(allocated & mask)) { | |
293 | m_full |= mask; | |
294 | return (i + (alignment - count)) << log2SubregionSize; | |
295 | } | |
296 | mask <<= alignment; | |
297 | } | |
298 | return notFound; | |
299 | } | |
300 | ||
301 | void free(size_t location, AllocationTableSizeClass& sizeClass) | |
302 | { | |
303 | ASSERT(sizeClass.blockSize() <= subregionSize); | |
304 | ||
305 | size_t entry = location >> log2SubregionSize; | |
306 | ||
307 | if (sizeClass.blockSize() < subregionSize) { | |
308 | BitField bit = 1ull << entry; | |
309 | m_suballocations[entry].free(location & (subregionSize - 1), sizeClass); | |
310 | // Check if the suballocation is now empty. | |
311 | if (m_suballocations[entry].isEmpty()) | |
312 | m_hasSuballocation &= ~bit; | |
313 | // No need to check, it clearly isn't full any more! | |
314 | m_full &= ~bit; | |
315 | } else { | |
316 | size_t count = sizeClass.blockCount(); | |
317 | BitField mask = ((1ull << count) - 1) << entry; | |
318 | ASSERT((m_full & mask) == mask); | |
319 | ASSERT(!(m_hasSuballocation & mask)); | |
320 | m_full &= ~mask; | |
321 | } | |
322 | } | |
323 | ||
324 | bool isEmpty() | |
325 | { | |
326 | return !(m_full | m_hasSuballocation); | |
327 | } | |
328 | ||
329 | bool isFull() | |
330 | { | |
331 | return !~m_full; | |
332 | } | |
333 | ||
334 | static size_t size() | |
335 | { | |
336 | return regionSize; | |
337 | } | |
338 | ||
339 | static AllocationTableSizeClass classForSize(size_t size) | |
340 | { | |
341 | if (size < subregionSize) { | |
342 | AllocationTableSizeClass sizeClass = NextLevel::classForSize(size); | |
343 | if (sizeClass.size() < NextLevel::size()) | |
344 | return sizeClass; | |
345 | } | |
346 | return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); | |
347 | } | |
348 | ||
349 | #ifndef NDEBUG | |
350 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
351 | { | |
352 | for (unsigned i = 0; i < indent; ++i) | |
353 | fprintf(stderr, " "); | |
354 | fprintf(stderr, "%08x: [", (int)parentOffset); | |
355 | for (unsigned i = 0; i < entries; ++i) { | |
356 | BitField bit = 1ull << i; | |
357 | char c = m_hasSuballocation & bit | |
358 | ? (m_full & bit ? 'N' : 'n') | |
359 | : (m_full & bit ? 'F' : '-'); | |
360 | fprintf(stderr, "%c", c); | |
361 | } | |
362 | fprintf(stderr, "]\n"); | |
363 | ||
364 | for (unsigned i = 0; i < entries; ++i) { | |
365 | BitField bit = 1ull << i; | |
366 | size_t offset = parentOffset | (subregionSize * i); | |
367 | if (m_hasSuballocation & bit) | |
368 | m_suballocations[i].dump(offset, indent + 1); | |
369 | } | |
370 | } | |
371 | #endif | |
372 | ||
373 | private: | |
374 | NextLevel m_suballocations[entries]; | |
375 | // Subregions exist in one of four states: | |
376 | // (1) empty (both bits clear) | |
377 | // (2) fully allocated as a single allocation (m_full set) | |
378 | // (3) partially allocated through suballocations (m_hasSuballocation set) | |
379 | // (4) fully allocated through suballocations (both bits set) | |
380 | BitField m_full; | |
381 | BitField m_hasSuballocation; | |
382 | }; | |
383 | ||
384 | typedef AllocationTableLeaf<6> PageTables256KB; | |
385 | typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB; | |
386 | typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB; | |
387 | typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB; | |
388 | ||
389 | #if CPU(ARM) | |
390 | typedef PageTables16MB FixedVMPoolPageTables; | |
391 | #elif CPU(X86_64) && !OS(LINUX) | |
392 | typedef PageTables1GB FixedVMPoolPageTables; | |
393 | #else | |
394 | typedef PageTables32MB FixedVMPoolPageTables; | |
395 | #endif | |
396 | ||
397 | class FixedVMPoolAllocator | |
398 | { | |
399 | public: | |
400 | FixedVMPoolAllocator() | |
401 | { | |
402 | m_base = mmap(0, FixedVMPoolPageTables::size(), INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0); | |
403 | ||
404 | if (m_base == MAP_FAILED) { | |
405 | #if ENABLE(INTERPRETER) | |
406 | m_base = 0; | |
407 | #else | |
408 | CRASH(); | |
409 | #endif | |
410 | } else { | |
411 | // For simplicity, we keep all memory in m_freeList in a 'released' state. | |
412 | // This means that we can simply reuse all memory when allocating, without | |
413 | // worrying about it's previous state, and also makes coalescing m_freeList | |
414 | // simpler since we need not worry about the possibility of coalescing released | |
415 | // chunks with non-released ones. | |
416 | release(m_base, FixedVMPoolPageTables::size()); | |
417 | } | |
418 | } | |
419 | ||
420 | void* alloc(size_t size) | |
421 | { | |
422 | ASSERT(size); | |
423 | AllocationTableSizeClass sizeClass = classForSize(size); | |
424 | ASSERT(sizeClass.size()); | |
425 | if (sizeClass.size() >= FixedVMPoolPageTables::size()) | |
426 | CRASH(); | |
427 | ||
428 | if (m_pages.isFull()) | |
429 | CRASH(); | |
430 | size_t offset = m_pages.allocate(sizeClass); | |
431 | if (offset == notFound) | |
432 | CRASH(); | |
433 | ||
434 | void* result = offsetToPointer(offset); | |
435 | reuse(result, size); | |
436 | return result; | |
437 | } | |
438 | ||
439 | void free(void* pointer, size_t size) | |
440 | { | |
441 | release(pointer, size); | |
442 | ||
443 | ASSERT(size); | |
444 | AllocationTableSizeClass sizeClass = classForSize(size); | |
445 | ASSERT(sizeClass.size()); | |
446 | ASSERT(sizeClass.size() < FixedVMPoolPageTables::size()); | |
447 | ||
448 | m_pages.free(pointerToOffset(pointer), sizeClass); | |
449 | } | |
450 | ||
451 | bool isValid() const | |
452 | { | |
453 | return !!m_base; | |
454 | } | |
455 | ||
456 | private: | |
457 | // Use madvise as apropriate to prevent freed pages from being spilled, | |
458 | // and to attempt to ensure that used memory is reported correctly. | |
459 | #if HAVE(MADV_FREE_REUSE) | |
460 | void release(void* position, size_t size) | |
461 | { | |
462 | while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } | |
463 | } | |
464 | ||
465 | void reuse(void* position, size_t size) | |
466 | { | |
467 | while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } | |
468 | } | |
469 | #elif HAVE(MADV_FREE) | |
470 | void release(void* position, size_t size) | |
471 | { | |
472 | while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { } | |
473 | } | |
474 | ||
475 | void reuse(void*, size_t) {} | |
476 | #elif HAVE(MADV_DONTNEED) | |
477 | void release(void* position, size_t size) | |
478 | { | |
479 | while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { } | |
480 | } | |
481 | ||
482 | void reuse(void*, size_t) {} | |
483 | #else | |
484 | void release(void*, size_t) {} | |
485 | void reuse(void*, size_t) {} | |
486 | #endif | |
487 | ||
488 | AllocationTableSizeClass classForSize(size_t size) | |
489 | { | |
490 | return FixedVMPoolPageTables::classForSize(size); | |
491 | } | |
492 | ||
493 | void* offsetToPointer(size_t offset) | |
494 | { | |
495 | return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_base) + offset); | |
496 | } | |
497 | ||
498 | size_t pointerToOffset(void* pointer) | |
499 | { | |
500 | return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_base); | |
501 | } | |
502 | ||
503 | void* m_base; | |
504 | FixedVMPoolPageTables m_pages; | |
505 | }; | |
506 | ||
507 | void ExecutableAllocator::intializePageSize() | |
508 | { | |
509 | ExecutableAllocator::pageSize = getpagesize(); | |
510 | } | |
511 | ||
512 | static FixedVMPoolAllocator* allocator = 0; | |
513 | static size_t allocatedCount = 0; | |
514 | static SpinLock spinlock = SPINLOCK_INITIALIZER; | |
515 | ||
516 | bool ExecutableAllocator::isValid() const | |
517 | { | |
518 | SpinLockHolder lock_holder(&spinlock); | |
519 | if (!allocator) | |
520 | allocator = new FixedVMPoolAllocator(); | |
521 | return allocator->isValid(); | |
522 | } | |
523 | ||
524 | ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) | |
525 | { | |
526 | SpinLockHolder lock_holder(&spinlock); | |
527 | ||
528 | if (!allocator) | |
529 | allocator = new FixedVMPoolAllocator(); | |
530 | ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size}; | |
531 | allocatedCount += size; | |
532 | return alloc; | |
533 | } | |
534 | ||
535 | void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation) | |
536 | { | |
537 | SpinLockHolder lock_holder(&spinlock); | |
538 | ||
539 | ASSERT(allocator); | |
540 | allocator->free(allocation.pages, allocation.size); | |
541 | allocatedCount -= allocation.size; | |
542 | } | |
543 | ||
544 | bool ExecutablePool::underMemoryPressure() | |
545 | { | |
546 | // Technically we should take the spin lock here, but we don't | |
547 | // care if we get stale data. This is only really a heuristic | |
548 | // anyway. | |
549 | return allocatedCount > (FixedVMPoolPageTables::size() / 2); | |
550 | } | |
551 | ||
552 | } | |
553 | ||
554 | #endif // HAVE(ASSEMBLER) |