2 * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedAllocator.h"
29 #include "DelayedReleaseScope.h"
30 #include "GCActivityCallback.h"
32 #include "IncrementalSweeper.h"
33 #include "JSCInlines.h"
35 #include <wtf/CurrentTime.h>
39 static bool isListPagedOut(double deadline
, DoublyLinkedList
<MarkedBlock
>& list
)
41 unsigned itersSinceLastTimeCheck
= 0;
42 MarkedBlock
* block
= list
.head();
44 block
= block
->next();
45 ++itersSinceLastTimeCheck
;
46 if (itersSinceLastTimeCheck
>= Heap::s_timeCheckResolution
) {
47 double currentTime
= WTF::monotonicallyIncreasingTime();
48 if (currentTime
> deadline
)
50 itersSinceLastTimeCheck
= 0;
56 bool MarkedAllocator::isPagedOut(double deadline
)
58 if (isListPagedOut(deadline
, m_blockList
))
63 inline void* MarkedAllocator::tryAllocateHelper(size_t bytes
)
65 // We need a while loop to check the free list because the DelayedReleaseScope
66 // could cause arbitrary code to execute and exhaust the free list that we
67 // thought had elements in it.
68 while (!m_freeList
.head
) {
69 DelayedReleaseScope
delayedReleaseScope(*m_markedSpace
);
71 ASSERT(m_currentBlock
== m_nextBlockToSweep
);
72 m_currentBlock
->didConsumeFreeList();
73 m_nextBlockToSweep
= m_currentBlock
->next();
77 for (MarkedBlock
*& block
= m_nextBlockToSweep
; block
; block
= next
) {
80 MarkedBlock::FreeList freeList
= block
->sweep(MarkedBlock::SweepToFreeList
);
82 double utilization
= ((double)MarkedBlock::blockSize
- (double)freeList
.bytes
) / (double)MarkedBlock::blockSize
;
83 if (utilization
>= Options::minMarkedBlockUtilization()) {
84 ASSERT(freeList
.bytes
|| !freeList
.head
);
85 m_blockList
.remove(block
);
86 m_retiredBlocks
.push(block
);
87 block
->didRetireBlock(freeList
);
91 if (bytes
> block
->cellSize()) {
92 block
->stopAllocating(freeList
);
96 m_currentBlock
= block
;
97 m_freeList
= freeList
;
101 if (!m_freeList
.head
) {
107 ASSERT(m_freeList
.head
);
108 void* head
= tryPopFreeList(bytes
);
110 m_markedSpace
->didAllocateInBlock(m_currentBlock
);
114 inline void* MarkedAllocator::tryPopFreeList(size_t bytes
)
116 ASSERT(m_currentBlock
);
117 if (bytes
> m_currentBlock
->cellSize())
120 MarkedBlock::FreeCell
* head
= m_freeList
.head
;
121 m_freeList
.head
= head
->next
;
125 inline void* MarkedAllocator::tryAllocate(size_t bytes
)
127 ASSERT(!m_heap
->isBusy());
128 m_heap
->m_operationInProgress
= Allocation
;
129 void* result
= tryAllocateHelper(bytes
);
131 // Due to the DelayedReleaseScope in tryAllocateHelper, some other thread might have
132 // created a new block after we thought we didn't find any free cells.
133 while (!result
&& m_currentBlock
) {
134 // A new block was added by another thread so try popping the free list.
135 result
= tryPopFreeList(bytes
);
138 // The free list was empty, so call tryAllocateHelper to do the normal sweeping stuff.
139 result
= tryAllocateHelper(bytes
);
142 m_heap
->m_operationInProgress
= NoOperation
;
143 ASSERT(result
|| !m_currentBlock
);
147 ALWAYS_INLINE
void MarkedAllocator::doTestCollectionsIfNeeded()
149 if (!Options::slowPathAllocsBetweenGCs())
152 static unsigned allocationCount
= 0;
153 if (!allocationCount
) {
154 if (!m_heap
->isDeferred())
155 m_heap
->collectAllGarbage();
156 ASSERT(m_heap
->m_operationInProgress
== NoOperation
);
158 if (++allocationCount
>= Options::slowPathAllocsBetweenGCs())
162 void* MarkedAllocator::allocateSlowCase(size_t bytes
)
164 ASSERT(m_heap
->vm()->currentThreadIsHoldingAPILock());
165 doTestCollectionsIfNeeded();
167 ASSERT(!m_markedSpace
->isIterating());
168 ASSERT(!m_freeList
.head
);
169 m_heap
->didAllocate(m_freeList
.bytes
);
171 void* result
= tryAllocate(bytes
);
173 if (LIKELY(result
!= 0))
176 if (m_heap
->collectIfNecessaryOrDefer()) {
177 result
= tryAllocate(bytes
);
182 ASSERT(!m_heap
->shouldCollect());
184 MarkedBlock
* block
= allocateBlock(bytes
);
188 result
= tryAllocate(bytes
);
193 MarkedBlock
* MarkedAllocator::allocateBlock(size_t bytes
)
195 size_t minBlockSize
= MarkedBlock::blockSize
;
196 size_t minAllocationSize
= WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock
) + bytes
);
197 size_t blockSize
= std::max(minBlockSize
, minAllocationSize
);
199 size_t cellSize
= m_cellSize
? m_cellSize
: WTF::roundUpToMultipleOf
<MarkedBlock::atomSize
>(bytes
);
201 if (blockSize
== MarkedBlock::blockSize
)
202 return MarkedBlock::create(m_heap
->blockAllocator().allocate
<MarkedBlock
>(), this, cellSize
, m_destructorType
);
203 return MarkedBlock::create(m_heap
->blockAllocator().allocateCustomSize(blockSize
, MarkedBlock::blockSize
), this, cellSize
, m_destructorType
);
206 void MarkedAllocator::addBlock(MarkedBlock
* block
)
208 ASSERT(!m_currentBlock
);
209 ASSERT(!m_freeList
.head
);
211 m_blockList
.append(block
);
212 m_nextBlockToSweep
= block
;
213 m_markedSpace
->didAddBlock(block
);
216 void MarkedAllocator::removeBlock(MarkedBlock
* block
)
218 if (m_currentBlock
== block
) {
219 m_currentBlock
= m_currentBlock
->next();
220 m_freeList
= MarkedBlock::FreeList();
222 if (m_nextBlockToSweep
== block
)
223 m_nextBlockToSweep
= m_nextBlockToSweep
->next();
225 block
->willRemoveBlock();
226 m_blockList
.remove(block
);
229 void MarkedAllocator::reset()
231 m_lastActiveBlock
= 0;
233 m_freeList
= MarkedBlock::FreeList();
234 if (m_heap
->operationInProgress() == FullCollection
)
235 m_blockList
.append(m_retiredBlocks
);
237 m_nextBlockToSweep
= m_blockList
.head();
240 struct LastChanceToFinalize
: MarkedBlock::VoidFunctor
{
241 void operator()(MarkedBlock
* block
) { block
->lastChanceToFinalize(); }
244 void MarkedAllocator::lastChanceToFinalize()
246 m_blockList
.append(m_retiredBlocks
);
247 LastChanceToFinalize functor
;
248 forEachBlock(functor
);