]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/MarkedAllocator.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / heap / MarkedAllocator.cpp
1 /*
2 * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "MarkedAllocator.h"
28
29 #include "GCActivityCallback.h"
30 #include "Heap.h"
31 #include "IncrementalSweeper.h"
32 #include "JSCInlines.h"
33 #include "VM.h"
34 #include <wtf/CurrentTime.h>
35
36 namespace JSC {
37
38 static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
39 {
40 unsigned itersSinceLastTimeCheck = 0;
41 MarkedBlock* block = list.head();
42 while (block) {
43 block = block->next();
44 ++itersSinceLastTimeCheck;
45 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
46 double currentTime = WTF::monotonicallyIncreasingTime();
47 if (currentTime > deadline)
48 return true;
49 itersSinceLastTimeCheck = 0;
50 }
51 }
52 return false;
53 }
54
55 bool MarkedAllocator::isPagedOut(double deadline)
56 {
57 if (isListPagedOut(deadline, m_blockList))
58 return true;
59 return false;
60 }
61
62 inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
63 {
64 if (m_currentBlock) {
65 ASSERT(m_currentBlock == m_nextBlockToSweep);
66 m_currentBlock->didConsumeFreeList();
67 m_nextBlockToSweep = m_currentBlock->next();
68 }
69
70 MarkedBlock* next;
71 for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
72 next = block->next();
73
74 MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
75
76 double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize;
77 if (utilization >= Options::minMarkedBlockUtilization()) {
78 ASSERT(freeList.bytes || !freeList.head);
79 m_blockList.remove(block);
80 m_retiredBlocks.push(block);
81 block->didRetireBlock(freeList);
82 continue;
83 }
84
85 if (bytes > block->cellSize()) {
86 block->stopAllocating(freeList);
87 continue;
88 }
89
90 m_currentBlock = block;
91 m_freeList = freeList;
92 break;
93 }
94
95 if (!m_freeList.head) {
96 m_currentBlock = 0;
97 return 0;
98 }
99
100 ASSERT(m_freeList.head);
101 void* head = tryPopFreeList(bytes);
102 ASSERT(head);
103 m_markedSpace->didAllocateInBlock(m_currentBlock);
104 return head;
105 }
106
107 inline void* MarkedAllocator::tryPopFreeList(size_t bytes)
108 {
109 ASSERT(m_currentBlock);
110 if (bytes > m_currentBlock->cellSize())
111 return 0;
112
113 MarkedBlock::FreeCell* head = m_freeList.head;
114 m_freeList.head = head->next;
115 return head;
116 }
117
118 inline void* MarkedAllocator::tryAllocate(size_t bytes)
119 {
120 ASSERT(!m_heap->isBusy());
121 m_heap->m_operationInProgress = Allocation;
122 void* result = tryAllocateHelper(bytes);
123
124 m_heap->m_operationInProgress = NoOperation;
125 ASSERT(result || !m_currentBlock);
126 return result;
127 }
128
129 ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded()
130 {
131 if (!Options::slowPathAllocsBetweenGCs())
132 return;
133
134 static unsigned allocationCount = 0;
135 if (!allocationCount) {
136 if (!m_heap->isDeferred())
137 m_heap->collectAllGarbage();
138 ASSERT(m_heap->m_operationInProgress == NoOperation);
139 }
140 if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
141 allocationCount = 0;
142 }
143
144 void* MarkedAllocator::allocateSlowCase(size_t bytes)
145 {
146 ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
147 doTestCollectionsIfNeeded();
148
149 ASSERT(!m_markedSpace->isIterating());
150 ASSERT(!m_freeList.head);
151 m_heap->didAllocate(m_freeList.bytes);
152
153 void* result = tryAllocate(bytes);
154
155 if (LIKELY(result != 0))
156 return result;
157
158 if (m_heap->collectIfNecessaryOrDefer()) {
159 result = tryAllocate(bytes);
160 if (result)
161 return result;
162 }
163
164 ASSERT(!m_heap->shouldCollect());
165
166 MarkedBlock* block = allocateBlock(bytes);
167 ASSERT(block);
168 addBlock(block);
169
170 result = tryAllocate(bytes);
171 ASSERT(result);
172 return result;
173 }
174
175 MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
176 {
177 size_t minBlockSize = MarkedBlock::blockSize;
178 size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
179 size_t blockSize = std::max(minBlockSize, minAllocationSize);
180
181 size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
182
183 return MarkedBlock::create(this, blockSize, cellSize, m_needsDestruction);
184 }
185
186 void MarkedAllocator::addBlock(MarkedBlock* block)
187 {
188 ASSERT(!m_currentBlock);
189 ASSERT(!m_freeList.head);
190
191 m_blockList.append(block);
192 m_nextBlockToSweep = block;
193 m_markedSpace->didAddBlock(block);
194 }
195
196 void MarkedAllocator::removeBlock(MarkedBlock* block)
197 {
198 if (m_currentBlock == block) {
199 m_currentBlock = m_currentBlock->next();
200 m_freeList = MarkedBlock::FreeList();
201 }
202 if (m_nextBlockToSweep == block)
203 m_nextBlockToSweep = m_nextBlockToSweep->next();
204
205 block->willRemoveBlock();
206 m_blockList.remove(block);
207 }
208
209 void MarkedAllocator::reset()
210 {
211 m_lastActiveBlock = 0;
212 m_currentBlock = 0;
213 m_freeList = MarkedBlock::FreeList();
214 if (m_heap->operationInProgress() == FullCollection)
215 m_blockList.append(m_retiredBlocks);
216
217 m_nextBlockToSweep = m_blockList.head();
218 }
219
220 struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
221 void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
222 };
223
224 void MarkedAllocator::lastChanceToFinalize()
225 {
226 m_blockList.append(m_retiredBlocks);
227 LastChanceToFinalize functor;
228 forEachBlock(functor);
229 }
230
231 } // namespace JSC