]>
Commit | Line | Data |
---|---|---|
ba379fdc A |
1 | /* |
2 | * Copyright (C) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #include "ExecutableAllocator.h" | |
29 | ||
4e4e5a6f | 30 | #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) |
ba379fdc | 31 | |
4e4e5a6f | 32 | #include <errno.h> |
ba379fdc A |
33 | |
34 | #include "TCSpinLock.h" | |
ba379fdc A |
35 | #include <sys/mman.h> |
36 | #include <unistd.h> | |
37 | #include <wtf/AVLTree.h> | |
38 | #include <wtf/VMTags.h> | |
39 | ||
4e4e5a6f A |
40 | #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_JIT) |
41 | ||
ba379fdc A |
42 | using namespace WTF; |
43 | ||
44 | namespace JSC { | |
45 | ||
b80e6193 | 46 | #define TwoPow(n) (1ull << n) |
4e4e5a6f | 47 | |
b80e6193 A |
48 | class AllocationTableSizeClass { |
49 | public: | |
50 | AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize) | |
51 | : m_blockSize(blockSize) | |
ba379fdc | 52 | { |
b80e6193 A |
53 | ASSERT(blockSize == TwoPow(log2BlockSize)); |
54 | ||
55 | // Calculate the number of blocks needed to hold size. | |
56 | size_t blockMask = blockSize - 1; | |
57 | m_blockCount = (size + blockMask) >> log2BlockSize; | |
58 | ||
59 | // Align to the smallest power of two >= m_blockCount. | |
60 | m_blockAlignment = 1; | |
61 | while (m_blockAlignment < m_blockCount) | |
62 | m_blockAlignment += m_blockAlignment; | |
ba379fdc A |
63 | } |
64 | ||
b80e6193 A |
65 | size_t blockSize() const { return m_blockSize; } |
66 | size_t blockCount() const { return m_blockCount; } | |
67 | size_t blockAlignment() const { return m_blockAlignment; } | |
ba379fdc | 68 | |
b80e6193 A |
69 | size_t size() |
70 | { | |
71 | return m_blockSize * m_blockCount; | |
72 | } | |
ba379fdc | 73 | |
b80e6193 A |
74 | private: |
75 | size_t m_blockSize; | |
76 | size_t m_blockCount; | |
77 | size_t m_blockAlignment; | |
ba379fdc A |
78 | }; |
79 | ||
b80e6193 A |
80 | template<unsigned log2Entries> |
81 | class AllocationTableLeaf { | |
82 | typedef uint64_t BitField; | |
ba379fdc | 83 | |
b80e6193 A |
84 | public: |
85 | static const unsigned log2SubregionSize = 12; // 2^12 == pagesize | |
86 | static const unsigned log2RegionSize = log2SubregionSize + log2Entries; | |
ba379fdc | 87 | |
b80e6193 A |
88 | static const size_t subregionSize = TwoPow(log2SubregionSize); |
89 | static const size_t regionSize = TwoPow(log2RegionSize); | |
90 | static const unsigned entries = TwoPow(log2Entries); | |
91 | COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField); | |
ba379fdc | 92 | |
b80e6193 A |
93 | AllocationTableLeaf() |
94 | : m_allocated(0) | |
95 | { | |
96 | } | |
ba379fdc | 97 | |
b80e6193 A |
98 | ~AllocationTableLeaf() |
99 | { | |
100 | ASSERT(isEmpty()); | |
101 | } | |
ba379fdc | 102 | |
b80e6193 | 103 | size_t allocate(AllocationTableSizeClass& sizeClass) |
ba379fdc | 104 | { |
b80e6193 A |
105 | ASSERT(sizeClass.blockSize() == subregionSize); |
106 | ASSERT(!isFull()); | |
107 | ||
108 | size_t alignment = sizeClass.blockAlignment(); | |
109 | size_t count = sizeClass.blockCount(); | |
110 | // Use this mask to check for spans of free blocks. | |
111 | BitField mask = ((1ull << count) - 1) << (alignment - count); | |
112 | ||
113 | // Step in units of alignment size. | |
114 | for (unsigned i = 0; i < entries; i += alignment) { | |
115 | if (!(m_allocated & mask)) { | |
116 | m_allocated |= mask; | |
117 | return (i + (alignment - count)) << log2SubregionSize; | |
118 | } | |
119 | mask <<= alignment; | |
120 | } | |
121 | return notFound; | |
ba379fdc A |
122 | } |
123 | ||
b80e6193 | 124 | void free(size_t location, AllocationTableSizeClass& sizeClass) |
ba379fdc | 125 | { |
b80e6193 A |
126 | ASSERT(sizeClass.blockSize() == subregionSize); |
127 | ||
128 | size_t entry = location >> log2SubregionSize; | |
129 | size_t count = sizeClass.blockCount(); | |
130 | BitField mask = ((1ull << count) - 1) << entry; | |
131 | ||
132 | ASSERT((m_allocated & mask) == mask); | |
133 | m_allocated &= ~mask; | |
ba379fdc | 134 | } |
b80e6193 A |
135 | |
136 | bool isEmpty() | |
4e4e5a6f | 137 | { |
b80e6193 | 138 | return !m_allocated; |
4e4e5a6f | 139 | } |
b80e6193 A |
140 | |
141 | bool isFull() | |
ba379fdc | 142 | { |
b80e6193 | 143 | return !~m_allocated; |
ba379fdc A |
144 | } |
145 | ||
b80e6193 A |
146 | static size_t size() |
147 | { | |
148 | return regionSize; | |
149 | } | |
150 | ||
151 | static AllocationTableSizeClass classForSize(size_t size) | |
152 | { | |
153 | return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); | |
154 | } | |
155 | ||
156 | #ifndef NDEBUG | |
157 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
158 | { | |
159 | for (unsigned i = 0; i < indent; ++i) | |
160 | fprintf(stderr, " "); | |
161 | fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated); | |
162 | } | |
ba379fdc A |
163 | #endif |
164 | ||
b80e6193 A |
165 | private: |
166 | BitField m_allocated; | |
167 | }; | |
168 | ||
169 | ||
170 | template<class NextLevel> | |
171 | class LazyAllocationTable { | |
172 | public: | |
173 | static const unsigned log2RegionSize = NextLevel::log2RegionSize; | |
174 | static const unsigned entries = NextLevel::entries; | |
175 | ||
176 | LazyAllocationTable() | |
177 | : m_ptr(0) | |
178 | { | |
179 | } | |
180 | ||
181 | ~LazyAllocationTable() | |
182 | { | |
183 | ASSERT(isEmpty()); | |
184 | } | |
185 | ||
186 | size_t allocate(AllocationTableSizeClass& sizeClass) | |
187 | { | |
188 | if (!m_ptr) | |
189 | m_ptr = new NextLevel(); | |
190 | return m_ptr->allocate(sizeClass); | |
191 | } | |
192 | ||
193 | void free(size_t location, AllocationTableSizeClass& sizeClass) | |
194 | { | |
195 | ASSERT(m_ptr); | |
196 | m_ptr->free(location, sizeClass); | |
197 | if (m_ptr->isEmpty()) { | |
198 | delete m_ptr; | |
199 | m_ptr = 0; | |
ba379fdc | 200 | } |
b80e6193 A |
201 | } |
202 | ||
203 | bool isEmpty() | |
204 | { | |
205 | return !m_ptr; | |
206 | } | |
207 | ||
208 | bool isFull() | |
209 | { | |
210 | return m_ptr && m_ptr->isFull(); | |
211 | } | |
212 | ||
213 | static size_t size() | |
214 | { | |
215 | return NextLevel::size(); | |
216 | } | |
217 | ||
218 | #ifndef NDEBUG | |
219 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
220 | { | |
221 | ASSERT(m_ptr); | |
222 | m_ptr->dump(parentOffset, indent); | |
223 | } | |
224 | #endif | |
225 | ||
226 | static AllocationTableSizeClass classForSize(size_t size) | |
227 | { | |
228 | return NextLevel::classForSize(size); | |
229 | } | |
230 | ||
231 | private: | |
232 | NextLevel* m_ptr; | |
233 | }; | |
234 | ||
235 | template<class NextLevel, unsigned log2Entries> | |
236 | class AllocationTableDirectory { | |
237 | typedef uint64_t BitField; | |
238 | ||
239 | public: | |
240 | static const unsigned log2SubregionSize = NextLevel::log2RegionSize; | |
241 | static const unsigned log2RegionSize = log2SubregionSize + log2Entries; | |
242 | ||
243 | static const size_t subregionSize = TwoPow(log2SubregionSize); | |
244 | static const size_t regionSize = TwoPow(log2RegionSize); | |
245 | static const unsigned entries = TwoPow(log2Entries); | |
246 | COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField); | |
247 | ||
248 | AllocationTableDirectory() | |
249 | : m_full(0) | |
250 | , m_hasSuballocation(0) | |
251 | { | |
252 | } | |
253 | ||
254 | ~AllocationTableDirectory() | |
255 | { | |
256 | ASSERT(isEmpty()); | |
257 | } | |
258 | ||
259 | size_t allocate(AllocationTableSizeClass& sizeClass) | |
260 | { | |
261 | ASSERT(sizeClass.blockSize() <= subregionSize); | |
262 | ASSERT(!isFull()); | |
263 | ||
264 | if (sizeClass.blockSize() < subregionSize) { | |
265 | BitField bit = 1; | |
266 | for (unsigned i = 0; i < entries; ++i, bit += bit) { | |
267 | if (m_full & bit) | |
ba379fdc | 268 | continue; |
b80e6193 A |
269 | size_t location = m_suballocations[i].allocate(sizeClass); |
270 | if (location != notFound) { | |
271 | // If this didn't already have a subregion, it does now! | |
272 | m_hasSuballocation |= bit; | |
273 | // Mirror the suballocation's full bit. | |
274 | if (m_suballocations[i].isFull()) | |
275 | m_full |= bit; | |
276 | return (i * subregionSize) + location; | |
ba379fdc | 277 | } |
ba379fdc | 278 | } |
b80e6193 A |
279 | return notFound; |
280 | } | |
281 | ||
282 | // A block is allocated if either it is fully allocated or contains suballocations. | |
283 | BitField allocated = m_full | m_hasSuballocation; | |
284 | ||
285 | size_t alignment = sizeClass.blockAlignment(); | |
286 | size_t count = sizeClass.blockCount(); | |
287 | // Use this mask to check for spans of free blocks. | |
288 | BitField mask = ((1ull << count) - 1) << (alignment - count); | |
289 | ||
290 | // Step in units of alignment size. | |
291 | for (unsigned i = 0; i < entries; i += alignment) { | |
292 | if (!(allocated & mask)) { | |
293 | m_full |= mask; | |
294 | return (i + (alignment - count)) << log2SubregionSize; | |
ba379fdc | 295 | } |
b80e6193 A |
296 | mask <<= alignment; |
297 | } | |
298 | return notFound; | |
299 | } | |
ba379fdc | 300 | |
b80e6193 A |
301 | void free(size_t location, AllocationTableSizeClass& sizeClass) |
302 | { | |
303 | ASSERT(sizeClass.blockSize() <= subregionSize); | |
304 | ||
305 | size_t entry = location >> log2SubregionSize; | |
306 | ||
307 | if (sizeClass.blockSize() < subregionSize) { | |
308 | BitField bit = 1ull << entry; | |
309 | m_suballocations[entry].free(location & (subregionSize - 1), sizeClass); | |
310 | // Check if the suballocation is now empty. | |
311 | if (m_suballocations[entry].isEmpty()) | |
312 | m_hasSuballocation &= ~bit; | |
313 | // No need to check, it clearly isn't full any more! | |
314 | m_full &= ~bit; | |
315 | } else { | |
316 | size_t count = sizeClass.blockCount(); | |
317 | BitField mask = ((1ull << count) - 1) << entry; | |
318 | ASSERT((m_full & mask) == mask); | |
319 | ASSERT(!(m_hasSuballocation & mask)); | |
320 | m_full &= ~mask; | |
ba379fdc | 321 | } |
b80e6193 | 322 | } |
ba379fdc | 323 | |
b80e6193 A |
324 | bool isEmpty() |
325 | { | |
326 | return !(m_full | m_hasSuballocation); | |
ba379fdc A |
327 | } |
328 | ||
b80e6193 A |
329 | bool isFull() |
330 | { | |
331 | return !~m_full; | |
332 | } | |
333 | ||
334 | static size_t size() | |
335 | { | |
336 | return regionSize; | |
337 | } | |
ba379fdc | 338 | |
b80e6193 A |
339 | static AllocationTableSizeClass classForSize(size_t size) |
340 | { | |
341 | if (size < subregionSize) { | |
342 | AllocationTableSizeClass sizeClass = NextLevel::classForSize(size); | |
343 | if (sizeClass.size() < NextLevel::size()) | |
344 | return sizeClass; | |
345 | } | |
346 | return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); | |
347 | } | |
348 | ||
349 | #ifndef NDEBUG | |
350 | void dump(size_t parentOffset = 0, unsigned indent = 0) | |
351 | { | |
352 | for (unsigned i = 0; i < indent; ++i) | |
353 | fprintf(stderr, " "); | |
354 | fprintf(stderr, "%08x: [", (int)parentOffset); | |
355 | for (unsigned i = 0; i < entries; ++i) { | |
356 | BitField bit = 1ull << i; | |
357 | char c = m_hasSuballocation & bit | |
358 | ? (m_full & bit ? 'N' : 'n') | |
359 | : (m_full & bit ? 'F' : '-'); | |
360 | fprintf(stderr, "%c", c); | |
361 | } | |
362 | fprintf(stderr, "]\n"); | |
363 | ||
364 | for (unsigned i = 0; i < entries; ++i) { | |
365 | BitField bit = 1ull << i; | |
366 | size_t offset = parentOffset | (subregionSize * i); | |
367 | if (m_hasSuballocation & bit) | |
368 | m_suballocations[i].dump(offset, indent + 1); | |
369 | } | |
370 | } | |
371 | #endif | |
372 | ||
373 | private: | |
374 | NextLevel m_suballocations[entries]; | |
375 | // Subregions exist in one of four states: | |
376 | // (1) empty (both bits clear) | |
377 | // (2) fully allocated as a single allocation (m_full set) | |
378 | // (3) partially allocated through suballocations (m_hasSuballocation set) | |
379 | // (4) fully allocated through suballocations (both bits set) | |
380 | BitField m_full; | |
381 | BitField m_hasSuballocation; | |
382 | }; | |
383 | ||
384 | typedef AllocationTableLeaf<6> PageTables256KB; | |
385 | typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB; | |
386 | typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB; | |
387 | typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB; | |
388 | ||
389 | #if CPU(ARM) | |
390 | typedef PageTables16MB FixedVMPoolPageTables; | |
391 | #elif CPU(X86_64) && !OS(LINUX) | |
392 | typedef PageTables1GB FixedVMPoolPageTables; | |
393 | #else | |
394 | typedef PageTables32MB FixedVMPoolPageTables; | |
4e4e5a6f | 395 | #endif |
b80e6193 A |
396 | |
397 | class FixedVMPoolAllocator | |
398 | { | |
399 | public: | |
400 | FixedVMPoolAllocator() | |
401 | { | |
402 | m_base = mmap(0, FixedVMPoolPageTables::size(), INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0); | |
ba379fdc | 403 | |
4e4e5a6f A |
404 | if (m_base == MAP_FAILED) { |
405 | #if ENABLE(INTERPRETER) | |
406 | m_base = 0; | |
407 | #else | |
408 | CRASH(); | |
409 | #endif | |
410 | } else { | |
411 | // For simplicity, we keep all memory in m_freeList in a 'released' state. | |
412 | // This means that we can simply reuse all memory when allocating, without | |
413 | // worrying about it's previous state, and also makes coalescing m_freeList | |
414 | // simpler since we need not worry about the possibility of coalescing released | |
415 | // chunks with non-released ones. | |
b80e6193 | 416 | release(m_base, FixedVMPoolPageTables::size()); |
4e4e5a6f | 417 | } |
ba379fdc | 418 | } |
b80e6193 | 419 | |
ba379fdc A |
420 | void* alloc(size_t size) |
421 | { | |
b80e6193 A |
422 | ASSERT(size); |
423 | AllocationTableSizeClass sizeClass = classForSize(size); | |
424 | ASSERT(sizeClass.size()); | |
425 | if (sizeClass.size() >= FixedVMPoolPageTables::size()) | |
426 | CRASH(); | |
427 | ||
428 | if (m_pages.isFull()) | |
429 | CRASH(); | |
430 | size_t offset = m_pages.allocate(sizeClass); | |
431 | if (offset == notFound) | |
432 | CRASH(); | |
ba379fdc | 433 | |
b80e6193 | 434 | void* result = offsetToPointer(offset); |
ba379fdc A |
435 | reuse(result, size); |
436 | return result; | |
437 | } | |
438 | ||
439 | void free(void* pointer, size_t size) | |
440 | { | |
ba379fdc A |
441 | release(pointer, size); |
442 | ||
b80e6193 A |
443 | ASSERT(size); |
444 | AllocationTableSizeClass sizeClass = classForSize(size); | |
445 | ASSERT(sizeClass.size()); | |
446 | ASSERT(sizeClass.size() < FixedVMPoolPageTables::size()); | |
447 | ||
448 | m_pages.free(pointerToOffset(pointer), sizeClass); | |
ba379fdc A |
449 | } |
450 | ||
b80e6193 A |
451 | bool isValid() const |
452 | { | |
453 | return !!m_base; | |
454 | } | |
4e4e5a6f | 455 | |
ba379fdc | 456 | private: |
b80e6193 A |
457 | // Use madvise as apropriate to prevent freed pages from being spilled, |
458 | // and to attempt to ensure that used memory is reported correctly. | |
459 | #if HAVE(MADV_FREE_REUSE) | |
460 | void release(void* position, size_t size) | |
461 | { | |
462 | while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } | |
463 | } | |
ba379fdc | 464 | |
b80e6193 A |
465 | void reuse(void* position, size_t size) |
466 | { | |
467 | while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } | |
468 | } | |
469 | #elif HAVE(MADV_FREE) | |
470 | void release(void* position, size_t size) | |
471 | { | |
472 | while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { } | |
473 | } | |
474 | ||
475 | void reuse(void*, size_t) {} | |
476 | #elif HAVE(MADV_DONTNEED) | |
477 | void release(void* position, size_t size) | |
ba379fdc | 478 | { |
b80e6193 | 479 | while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { } |
ba379fdc | 480 | } |
b80e6193 A |
481 | |
482 | void reuse(void*, size_t) {} | |
483 | #else | |
484 | void release(void*, size_t) {} | |
485 | void reuse(void*, size_t) {} | |
ba379fdc A |
486 | #endif |
487 | ||
b80e6193 A |
488 | AllocationTableSizeClass classForSize(size_t size) |
489 | { | |
490 | return FixedVMPoolPageTables::classForSize(size); | |
491 | } | |
ba379fdc | 492 | |
b80e6193 A |
493 | void* offsetToPointer(size_t offset) |
494 | { | |
495 | return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_base) + offset); | |
496 | } | |
ba379fdc | 497 | |
b80e6193 A |
498 | size_t pointerToOffset(void* pointer) |
499 | { | |
500 | return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_base); | |
501 | } | |
ba379fdc A |
502 | |
503 | void* m_base; | |
b80e6193 | 504 | FixedVMPoolPageTables m_pages; |
ba379fdc A |
505 | }; |
506 | ||
507 | void ExecutableAllocator::intializePageSize() | |
508 | { | |
509 | ExecutableAllocator::pageSize = getpagesize(); | |
510 | } | |
511 | ||
512 | static FixedVMPoolAllocator* allocator = 0; | |
b80e6193 | 513 | static size_t allocatedCount = 0; |
ba379fdc A |
514 | static SpinLock spinlock = SPINLOCK_INITIALIZER; |
515 | ||
4e4e5a6f A |
516 | bool ExecutableAllocator::isValid() const |
517 | { | |
518 | SpinLockHolder lock_holder(&spinlock); | |
519 | if (!allocator) | |
b80e6193 | 520 | allocator = new FixedVMPoolAllocator(); |
4e4e5a6f A |
521 | return allocator->isValid(); |
522 | } | |
523 | ||
ba379fdc A |
524 | ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) |
525 | { | |
4e4e5a6f | 526 | SpinLockHolder lock_holder(&spinlock); |
ba379fdc A |
527 | |
528 | if (!allocator) | |
b80e6193 | 529 | allocator = new FixedVMPoolAllocator(); |
ba379fdc | 530 | ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size}; |
b80e6193 | 531 | allocatedCount += size; |
ba379fdc A |
532 | return alloc; |
533 | } | |
534 | ||
535 | void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation) | |
536 | { | |
4e4e5a6f | 537 | SpinLockHolder lock_holder(&spinlock); |
ba379fdc A |
538 | |
539 | ASSERT(allocator); | |
540 | allocator->free(allocation.pages, allocation.size); | |
b80e6193 A |
541 | allocatedCount -= allocation.size; |
542 | } | |
543 | ||
544 | bool ExecutablePool::underMemoryPressure() | |
545 | { | |
546 | // Technically we should take the spin lock here, but we don't | |
547 | // care if we get stale data. This is only really a heuristic | |
548 | // anyway. | |
549 | return allocatedCount > (FixedVMPoolPageTables::size() / 2); | |
ba379fdc A |
550 | } |
551 | ||
552 | } | |
553 | ||
554 | #endif // HAVE(ASSEMBLER) |