]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/MarkedAllocator.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / heap / MarkedAllocator.cpp
CommitLineData
81345200
A
1/*
2 * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
6fe7ccc8
A
26#include "config.h"
27#include "MarkedAllocator.h"
28
81345200 29#include "DelayedReleaseScope.h"
6fe7ccc8
A
30#include "GCActivityCallback.h"
31#include "Heap.h"
93a37866 32#include "IncrementalSweeper.h"
81345200 33#include "JSCInlines.h"
93a37866 34#include "VM.h"
6fe7ccc8
A
35#include <wtf/CurrentTime.h>
36
37namespace JSC {
38
81345200 39static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
6fe7ccc8
A
40{
41 unsigned itersSinceLastTimeCheck = 0;
81345200 42 MarkedBlock* block = list.head();
6fe7ccc8
A
43 while (block) {
44 block = block->next();
45 ++itersSinceLastTimeCheck;
46 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
47 double currentTime = WTF::monotonicallyIncreasingTime();
48 if (currentTime > deadline)
49 return true;
50 itersSinceLastTimeCheck = 0;
51 }
52 }
81345200
A
53 return false;
54}
6fe7ccc8 55
81345200
A
56bool MarkedAllocator::isPagedOut(double deadline)
57{
58 if (isListPagedOut(deadline, m_blockList))
59 return true;
6fe7ccc8
A
60 return false;
61}
62
93a37866 63inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
6fe7ccc8 64{
81345200
A
65 // We need a while loop to check the free list because the DelayedReleaseScope
66 // could cause arbitrary code to execute and exhaust the free list that we
67 // thought had elements in it.
68 while (!m_freeList.head) {
69 DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
70 if (m_currentBlock) {
71 ASSERT(m_currentBlock == m_nextBlockToSweep);
72 m_currentBlock->didConsumeFreeList();
73 m_nextBlockToSweep = m_currentBlock->next();
74 }
75
76 MarkedBlock* next;
77 for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
78 next = block->next();
79
93a37866 80 MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
81345200
A
81
82 double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize;
83 if (utilization >= Options::minMarkedBlockUtilization()) {
84 ASSERT(freeList.bytes || !freeList.head);
85 m_blockList.remove(block);
86 m_retiredBlocks.push(block);
87 block->didRetireBlock(freeList);
93a37866
A
88 continue;
89 }
90
91 if (bytes > block->cellSize()) {
81345200 92 block->stopAllocating(freeList);
93a37866
A
93 continue;
94 }
95
96 m_currentBlock = block;
97 m_freeList = freeList;
98 break;
6fe7ccc8
A
99 }
100
93a37866
A
101 if (!m_freeList.head) {
102 m_currentBlock = 0;
6fe7ccc8 103 return 0;
93a37866 104 }
6fe7ccc8 105 }
81345200
A
106
107 ASSERT(m_freeList.head);
108 void* head = tryPopFreeList(bytes);
109 ASSERT(head);
110 m_markedSpace->didAllocateInBlock(m_currentBlock);
111 return head;
112}
113
114inline void* MarkedAllocator::tryPopFreeList(size_t bytes)
115{
116 ASSERT(m_currentBlock);
117 if (bytes > m_currentBlock->cellSize())
118 return 0;
119
6fe7ccc8
A
120 MarkedBlock::FreeCell* head = m_freeList.head;
121 m_freeList.head = head->next;
6fe7ccc8
A
122 return head;
123}
81345200 124
93a37866 125inline void* MarkedAllocator::tryAllocate(size_t bytes)
6fe7ccc8
A
126{
127 ASSERT(!m_heap->isBusy());
128 m_heap->m_operationInProgress = Allocation;
93a37866 129 void* result = tryAllocateHelper(bytes);
81345200
A
130
131 // Due to the DelayedReleaseScope in tryAllocateHelper, some other thread might have
132 // created a new block after we thought we didn't find any free cells.
133 while (!result && m_currentBlock) {
134 // A new block was added by another thread so try popping the free list.
135 result = tryPopFreeList(bytes);
136 if (result)
137 break;
138 // The free list was empty, so call tryAllocateHelper to do the normal sweeping stuff.
139 result = tryAllocateHelper(bytes);
140 }
141
6fe7ccc8 142 m_heap->m_operationInProgress = NoOperation;
81345200 143 ASSERT(result || !m_currentBlock);
6fe7ccc8
A
144 return result;
145}
81345200
A
146
147ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded()
148{
149 if (!Options::slowPathAllocsBetweenGCs())
150 return;
151
152 static unsigned allocationCount = 0;
153 if (!allocationCount) {
154 if (!m_heap->isDeferred())
155 m_heap->collectAllGarbage();
156 ASSERT(m_heap->m_operationInProgress == NoOperation);
157 }
158 if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
159 allocationCount = 0;
160}
161
93a37866 162void* MarkedAllocator::allocateSlowCase(size_t bytes)
6fe7ccc8 163{
81345200
A
164 ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
165 doTestCollectionsIfNeeded();
166
167 ASSERT(!m_markedSpace->isIterating());
6fe7ccc8
A
168 ASSERT(!m_freeList.head);
169 m_heap->didAllocate(m_freeList.bytes);
170
93a37866 171 void* result = tryAllocate(bytes);
6fe7ccc8
A
172
173 if (LIKELY(result != 0))
174 return result;
175
81345200 176 if (m_heap->collectIfNecessaryOrDefer()) {
93a37866 177 result = tryAllocate(bytes);
6fe7ccc8
A
178 if (result)
179 return result;
180 }
181
182 ASSERT(!m_heap->shouldCollect());
183
93a37866 184 MarkedBlock* block = allocateBlock(bytes);
6fe7ccc8
A
185 ASSERT(block);
186 addBlock(block);
187
93a37866 188 result = tryAllocate(bytes);
6fe7ccc8
A
189 ASSERT(result);
190 return result;
191}
93a37866
A
192
193MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
6fe7ccc8 194{
93a37866
A
195 size_t minBlockSize = MarkedBlock::blockSize;
196 size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
197 size_t blockSize = std::max(minBlockSize, minAllocationSize);
198
199 size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
200
201 if (blockSize == MarkedBlock::blockSize)
202 return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
203 return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
6fe7ccc8
A
204}
205
206void MarkedAllocator::addBlock(MarkedBlock* block)
207{
208 ASSERT(!m_currentBlock);
209 ASSERT(!m_freeList.head);
210
211 m_blockList.append(block);
81345200 212 m_nextBlockToSweep = block;
93a37866 213 m_markedSpace->didAddBlock(block);
6fe7ccc8
A
214}
215
216void MarkedAllocator::removeBlock(MarkedBlock* block)
217{
93a37866
A
218 if (m_currentBlock == block) {
219 m_currentBlock = m_currentBlock->next();
220 m_freeList = MarkedBlock::FreeList();
221 }
81345200
A
222 if (m_nextBlockToSweep == block)
223 m_nextBlockToSweep = m_nextBlockToSweep->next();
224
225 block->willRemoveBlock();
6fe7ccc8
A
226 m_blockList.remove(block);
227}
228
81345200
A
229void MarkedAllocator::reset()
230{
231 m_lastActiveBlock = 0;
232 m_currentBlock = 0;
233 m_freeList = MarkedBlock::FreeList();
234 if (m_heap->operationInProgress() == FullCollection)
235 m_blockList.append(m_retiredBlocks);
236
237 m_nextBlockToSweep = m_blockList.head();
238}
239
240struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
241 void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
242};
243
244void MarkedAllocator::lastChanceToFinalize()
245{
246 m_blockList.append(m_retiredBlocks);
247 LastChanceToFinalize functor;
248 forEachBlock(functor);
249}
250
6fe7ccc8 251} // namespace JSC