2 * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedAllocator.h"
29 #include "GCActivityCallback.h"
31 #include "IncrementalSweeper.h"
32 #include "JSCInlines.h"
34 #include <wtf/CurrentTime.h>
38 static bool isListPagedOut(double deadline
, DoublyLinkedList
<MarkedBlock
>& list
)
40 unsigned itersSinceLastTimeCheck
= 0;
41 MarkedBlock
* block
= list
.head();
43 block
= block
->next();
44 ++itersSinceLastTimeCheck
;
45 if (itersSinceLastTimeCheck
>= Heap::s_timeCheckResolution
) {
46 double currentTime
= WTF::monotonicallyIncreasingTime();
47 if (currentTime
> deadline
)
49 itersSinceLastTimeCheck
= 0;
55 bool MarkedAllocator::isPagedOut(double deadline
)
57 if (isListPagedOut(deadline
, m_blockList
))
62 inline void* MarkedAllocator::tryAllocateHelper(size_t bytes
)
65 ASSERT(m_currentBlock
== m_nextBlockToSweep
);
66 m_currentBlock
->didConsumeFreeList();
67 m_nextBlockToSweep
= m_currentBlock
->next();
71 for (MarkedBlock
*& block
= m_nextBlockToSweep
; block
; block
= next
) {
74 MarkedBlock::FreeList freeList
= block
->sweep(MarkedBlock::SweepToFreeList
);
76 double utilization
= ((double)MarkedBlock::blockSize
- (double)freeList
.bytes
) / (double)MarkedBlock::blockSize
;
77 if (utilization
>= Options::minMarkedBlockUtilization()) {
78 ASSERT(freeList
.bytes
|| !freeList
.head
);
79 m_blockList
.remove(block
);
80 m_retiredBlocks
.push(block
);
81 block
->didRetireBlock(freeList
);
85 if (bytes
> block
->cellSize()) {
86 block
->stopAllocating(freeList
);
90 m_currentBlock
= block
;
91 m_freeList
= freeList
;
95 if (!m_freeList
.head
) {
100 ASSERT(m_freeList
.head
);
101 void* head
= tryPopFreeList(bytes
);
103 m_markedSpace
->didAllocateInBlock(m_currentBlock
);
107 inline void* MarkedAllocator::tryPopFreeList(size_t bytes
)
109 ASSERT(m_currentBlock
);
110 if (bytes
> m_currentBlock
->cellSize())
113 MarkedBlock::FreeCell
* head
= m_freeList
.head
;
114 m_freeList
.head
= head
->next
;
118 inline void* MarkedAllocator::tryAllocate(size_t bytes
)
120 ASSERT(!m_heap
->isBusy());
121 m_heap
->m_operationInProgress
= Allocation
;
122 void* result
= tryAllocateHelper(bytes
);
124 m_heap
->m_operationInProgress
= NoOperation
;
125 ASSERT(result
|| !m_currentBlock
);
129 ALWAYS_INLINE
void MarkedAllocator::doTestCollectionsIfNeeded()
131 if (!Options::slowPathAllocsBetweenGCs())
134 static unsigned allocationCount
= 0;
135 if (!allocationCount
) {
136 if (!m_heap
->isDeferred())
137 m_heap
->collectAllGarbage();
138 ASSERT(m_heap
->m_operationInProgress
== NoOperation
);
140 if (++allocationCount
>= Options::slowPathAllocsBetweenGCs())
144 void* MarkedAllocator::allocateSlowCase(size_t bytes
)
146 ASSERT(m_heap
->vm()->currentThreadIsHoldingAPILock());
147 doTestCollectionsIfNeeded();
149 ASSERT(!m_markedSpace
->isIterating());
150 ASSERT(!m_freeList
.head
);
151 m_heap
->didAllocate(m_freeList
.bytes
);
153 void* result
= tryAllocate(bytes
);
155 if (LIKELY(result
!= 0))
158 if (m_heap
->collectIfNecessaryOrDefer()) {
159 result
= tryAllocate(bytes
);
164 ASSERT(!m_heap
->shouldCollect());
166 MarkedBlock
* block
= allocateBlock(bytes
);
170 result
= tryAllocate(bytes
);
175 MarkedBlock
* MarkedAllocator::allocateBlock(size_t bytes
)
177 size_t minBlockSize
= MarkedBlock::blockSize
;
178 size_t minAllocationSize
= WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock
) + bytes
);
179 size_t blockSize
= std::max(minBlockSize
, minAllocationSize
);
181 size_t cellSize
= m_cellSize
? m_cellSize
: WTF::roundUpToMultipleOf
<MarkedBlock::atomSize
>(bytes
);
183 return MarkedBlock::create(this, blockSize
, cellSize
, m_needsDestruction
);
186 void MarkedAllocator::addBlock(MarkedBlock
* block
)
188 ASSERT(!m_currentBlock
);
189 ASSERT(!m_freeList
.head
);
191 m_blockList
.append(block
);
192 m_nextBlockToSweep
= block
;
193 m_markedSpace
->didAddBlock(block
);
196 void MarkedAllocator::removeBlock(MarkedBlock
* block
)
198 if (m_currentBlock
== block
) {
199 m_currentBlock
= m_currentBlock
->next();
200 m_freeList
= MarkedBlock::FreeList();
202 if (m_nextBlockToSweep
== block
)
203 m_nextBlockToSweep
= m_nextBlockToSweep
->next();
205 block
->willRemoveBlock();
206 m_blockList
.remove(block
);
209 void MarkedAllocator::reset()
211 m_lastActiveBlock
= 0;
213 m_freeList
= MarkedBlock::FreeList();
214 if (m_heap
->operationInProgress() == FullCollection
)
215 m_blockList
.append(m_retiredBlocks
);
217 m_nextBlockToSweep
= m_blockList
.head();
220 struct LastChanceToFinalize
: MarkedBlock::VoidFunctor
{
221 void operator()(MarkedBlock
* block
) { block
->lastChanceToFinalize(); }
224 void MarkedAllocator::lastChanceToFinalize()
226 m_blockList
.append(m_retiredBlocks
);
227 LastChanceToFinalize functor
;
228 forEachBlock(functor
);