X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/heap/MarkedAllocator.cpp diff --git a/heap/MarkedAllocator.cpp b/heap/MarkedAllocator.cpp index 2135e99..2dbf8ae 100644 --- a/heap/MarkedAllocator.cpp +++ b/heap/MarkedAllocator.cpp @@ -1,17 +1,44 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + #include "config.h" #include "MarkedAllocator.h" #include "GCActivityCallback.h" #include "Heap.h" -#include "JSGlobalData.h" +#include "IncrementalSweeper.h" +#include "JSCInlines.h" +#include "VM.h" #include namespace JSC { -bool MarkedAllocator::isPagedOut(double deadline) +static bool isListPagedOut(double deadline, DoublyLinkedList& list) { unsigned itersSinceLastTimeCheck = 0; - HeapBlock* block = m_blockList.head(); + MarkedBlock* block = list.head(); while (block) { block = block->next(); ++itersSinceLastTimeCheck; @@ -22,85 +49,138 @@ bool MarkedAllocator::isPagedOut(double deadline) itersSinceLastTimeCheck = 0; } } + return false; +} +bool MarkedAllocator::isPagedOut(double deadline) +{ + if (isListPagedOut(deadline, m_blockList)) + return true; return false; } -inline void* MarkedAllocator::tryAllocateHelper() +inline void* MarkedAllocator::tryAllocateHelper(size_t bytes) { - if (!m_freeList.head) { - for (MarkedBlock*& block = m_currentBlock; block; block = static_cast(block->next())) { - m_freeList = block->sweep(MarkedBlock::SweepToFreeList); - if (m_freeList.head) - break; - block->didConsumeFreeList(); - } + if (m_currentBlock) { + ASSERT(m_currentBlock == m_nextBlockToSweep); + m_currentBlock->didConsumeFreeList(); + m_nextBlockToSweep = m_currentBlock->next(); + } + + MarkedBlock* next; + for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) { + next = block->next(); + + MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList); - if (!m_freeList.head) - return 0; + double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize; + if (utilization >= Options::minMarkedBlockUtilization()) { + ASSERT(freeList.bytes || !freeList.head); + m_blockList.remove(block); + m_retiredBlocks.push(block); + block->didRetireBlock(freeList); + continue; + } + + if (bytes > block->cellSize()) { + block->stopAllocating(freeList); + continue; + } + + m_currentBlock = block; + m_freeList = freeList; + break; } + if (!m_freeList.head) { + m_currentBlock = 0; + return 0; + } + + ASSERT(m_freeList.head); + void* head = tryPopFreeList(bytes); + ASSERT(head); + m_markedSpace->didAllocateInBlock(m_currentBlock); + return head; +} + +inline void* MarkedAllocator::tryPopFreeList(size_t bytes) +{ + ASSERT(m_currentBlock); + if (bytes > m_currentBlock->cellSize()) + return 0; + MarkedBlock::FreeCell* head = m_freeList.head; m_freeList.head = head->next; - ASSERT(head); return head; } - -inline void* MarkedAllocator::tryAllocate() + +inline void* MarkedAllocator::tryAllocate(size_t bytes) { ASSERT(!m_heap->isBusy()); m_heap->m_operationInProgress = Allocation; - void* result = tryAllocateHelper(); + void* result = tryAllocateHelper(bytes); + m_heap->m_operationInProgress = NoOperation; + ASSERT(result || !m_currentBlock); return result; } - -void* MarkedAllocator::allocateSlowCase() + +ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded() { - ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock()); -#if COLLECT_ON_EVERY_ALLOCATION - m_heap->collectAllGarbage(); - ASSERT(m_heap->m_operationInProgress == NoOperation); -#endif - + if (!Options::slowPathAllocsBetweenGCs()) + return; + + static unsigned allocationCount = 0; + if (!allocationCount) { + if (!m_heap->isDeferred()) + m_heap->collectAllGarbage(); + ASSERT(m_heap->m_operationInProgress == NoOperation); + } + if (++allocationCount >= Options::slowPathAllocsBetweenGCs()) + allocationCount = 0; +} + +void* MarkedAllocator::allocateSlowCase(size_t bytes) +{ + ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); + doTestCollectionsIfNeeded(); + + ASSERT(!m_markedSpace->isIterating()); ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); - void* result = tryAllocate(); + void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; - if (m_heap->shouldCollect()) { - m_heap->collect(Heap::DoNotSweep); - - result = tryAllocate(); + if (m_heap->collectIfNecessaryOrDefer()) { + result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); - MarkedBlock* block = allocateBlock(); + MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); - result = tryAllocate(); + result = tryAllocate(bytes); ASSERT(result); return result; } - -MarkedBlock* MarkedAllocator::allocateBlock() + +MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes) { - MarkedBlock* block = static_cast(m_heap->blockAllocator().allocate()); - if (block) - block = MarkedBlock::recycle(block, m_heap, m_cellSize, m_cellsNeedDestruction); - else - block = MarkedBlock::create(m_heap, m_cellSize, m_cellsNeedDestruction); - - m_markedSpace->didAddBlock(block); - - return block; + size_t minBlockSize = MarkedBlock::blockSize; + size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes); + size_t blockSize = std::max(minBlockSize, minAllocationSize); + + size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf(bytes); + + return MarkedBlock::create(this, blockSize, cellSize, m_needsDestruction); } void MarkedAllocator::addBlock(MarkedBlock* block) @@ -109,15 +189,43 @@ void MarkedAllocator::addBlock(MarkedBlock* block) ASSERT(!m_freeList.head); m_blockList.append(block); - m_currentBlock = block; - m_freeList = block->sweep(MarkedBlock::SweepToFreeList); + m_nextBlockToSweep = block; + m_markedSpace->didAddBlock(block); } void MarkedAllocator::removeBlock(MarkedBlock* block) { - if (m_currentBlock == block) - m_currentBlock = 0; + if (m_currentBlock == block) { + m_currentBlock = m_currentBlock->next(); + m_freeList = MarkedBlock::FreeList(); + } + if (m_nextBlockToSweep == block) + m_nextBlockToSweep = m_nextBlockToSweep->next(); + + block->willRemoveBlock(); m_blockList.remove(block); } +void MarkedAllocator::reset() +{ + m_lastActiveBlock = 0; + m_currentBlock = 0; + m_freeList = MarkedBlock::FreeList(); + if (m_heap->operationInProgress() == FullCollection) + m_blockList.append(m_retiredBlocks); + + m_nextBlockToSweep = m_blockList.head(); +} + +struct LastChanceToFinalize : MarkedBlock::VoidFunctor { + void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); } +}; + +void MarkedAllocator::lastChanceToFinalize() +{ + m_blockList.append(m_retiredBlocks); + LastChanceToFinalize functor; + forEachBlock(functor); +} + } // namespace JSC