]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - heap/CopiedSpace.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / heap / CopiedSpace.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CopiedSpace.h"
28
29#include "CopiedSpaceInlines.h"
30#include "GCActivityCallback.h"
31#include "JSCInlines.h"
32#include "Options.h"
33
34namespace JSC {
35
36CopiedSpace::CopiedSpace(Heap* heap)
37 : m_heap(heap)
38 , m_inCopyingPhase(false)
39 , m_shouldDoCopyPhase(false)
40 , m_numberOfLoanedBlocks(0)
41 , m_bytesRemovedFromOldSpaceDueToReallocation(0)
42{
43}
44
45CopiedSpace::~CopiedSpace()
46{
47 while (!m_oldGen.toSpace->isEmpty())
48 CopiedBlock::destroy(m_oldGen.toSpace->removeHead());
49
50 while (!m_oldGen.fromSpace->isEmpty())
51 CopiedBlock::destroy(m_oldGen.fromSpace->removeHead());
52
53 while (!m_oldGen.oversizeBlocks.isEmpty())
54 CopiedBlock::destroy(m_oldGen.oversizeBlocks.removeHead());
55
56 while (!m_newGen.toSpace->isEmpty())
57 CopiedBlock::destroy(m_newGen.toSpace->removeHead());
58
59 while (!m_newGen.fromSpace->isEmpty())
60 CopiedBlock::destroy(m_newGen.fromSpace->removeHead());
61
62 while (!m_newGen.oversizeBlocks.isEmpty())
63 CopiedBlock::destroy(m_newGen.oversizeBlocks.removeHead());
64
65 ASSERT(m_oldGen.toSpace->isEmpty());
66 ASSERT(m_oldGen.fromSpace->isEmpty());
67 ASSERT(m_oldGen.oversizeBlocks.isEmpty());
68 ASSERT(m_newGen.toSpace->isEmpty());
69 ASSERT(m_newGen.fromSpace->isEmpty());
70 ASSERT(m_newGen.oversizeBlocks.isEmpty());
71}
72
73void CopiedSpace::init()
74{
75 m_oldGen.toSpace = &m_oldGen.blocks1;
76 m_oldGen.fromSpace = &m_oldGen.blocks2;
77
78 m_newGen.toSpace = &m_newGen.blocks1;
79 m_newGen.fromSpace = &m_newGen.blocks2;
80
81 allocateBlock();
82}
83
84CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
85{
86 if (isOversize(bytes))
87 return tryAllocateOversize(bytes, outPtr);
88
89 ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
90 m_heap->didAllocate(m_allocator.currentCapacity());
91
92 allocateBlock();
93
94 *outPtr = m_allocator.forceAllocate(bytes);
95 return true;
96}
97
98CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
99{
100 ASSERT(isOversize(bytes));
101
102 CopiedBlock* block = CopiedBlock::create(WTF::roundUpToMultipleOf<sizeof(double)>(sizeof(CopiedBlock) + bytes));
103 m_newGen.oversizeBlocks.push(block);
104 m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
105 m_blockSet.add(block);
106 ASSERT(!block->isOld());
107
108 CopiedAllocator allocator;
109 allocator.setCurrentBlock(block);
110 *outPtr = allocator.forceAllocate(bytes);
111 allocator.resetCurrentBlock();
112
113 m_heap->didAllocate(block->capacity());
114
115 return true;
116}
117
118CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
119{
120 if (oldSize >= newSize)
121 return true;
122
123 void* oldPtr = *ptr;
124 ASSERT(!m_heap->vm()->isInitializingObject());
125
126 if (CopiedSpace::blockFor(oldPtr)->isOversize() || isOversize(newSize))
127 return tryReallocateOversize(ptr, oldSize, newSize);
128
129 if (m_allocator.tryReallocate(oldPtr, oldSize, newSize))
130 return true;
131
132 void* result = 0;
133 if (!tryAllocate(newSize, &result)) {
134 *ptr = 0;
135 return false;
136 }
137 memcpy(result, oldPtr, oldSize);
138 *ptr = result;
139 return true;
140}
141
142CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
143{
144 ASSERT(isOversize(oldSize) || isOversize(newSize));
145 ASSERT(newSize > oldSize);
146
147 void* oldPtr = *ptr;
148
149 void* newPtr = 0;
150 if (!tryAllocateOversize(newSize, &newPtr)) {
151 *ptr = 0;
152 return false;
153 }
154
155 memcpy(newPtr, oldPtr, oldSize);
156
157 CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr);
158 if (oldBlock->isOversize()) {
159 // FIXME: Eagerly deallocating the old space block probably buys more confusion than
160 // value.
161 // https://bugs.webkit.org/show_bug.cgi?id=144750
162 if (oldBlock->isOld()) {
163 m_bytesRemovedFromOldSpaceDueToReallocation += oldBlock->size();
164 m_oldGen.oversizeBlocks.remove(oldBlock);
165 } else
166 m_newGen.oversizeBlocks.remove(oldBlock);
167 m_blockSet.remove(oldBlock);
168 CopiedBlock::destroy(oldBlock);
169 }
170
171 *ptr = newPtr;
172 return true;
173}
174
175void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
176{
177 ASSERT(m_inCopyingPhase);
178
179 if (exchange)
180 *exchange = allocateBlockForCopyingPhase();
181
182 if (!block)
183 return;
184
185 if (!block->dataSize()) {
186 recycleBorrowedBlock(block);
187 return;
188 }
189
190 block->zeroFillWilderness();
191
192 {
193 // Always put the block into the old gen because it's being promoted!
194 SpinLockHolder locker(&m_toSpaceLock);
195 m_oldGen.toSpace->push(block);
196 m_blockSet.add(block);
197 m_oldGen.blockFilter.add(reinterpret_cast<Bits>(block));
198 }
199
200 {
201 MutexLocker locker(m_loanedBlocksLock);
202 ASSERT(m_numberOfLoanedBlocks > 0);
203 ASSERT(m_inCopyingPhase);
204 m_numberOfLoanedBlocks--;
205 if (!m_numberOfLoanedBlocks)
206 m_loanedBlocksCondition.signal();
207 }
208}
209
210void CopiedSpace::didStartFullCollection()
211{
212 ASSERT(heap()->operationInProgress() == FullCollection);
213 ASSERT(m_oldGen.fromSpace->isEmpty());
214 ASSERT(m_newGen.fromSpace->isEmpty());
215
216#ifndef NDEBUG
217 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
218 ASSERT(!block->liveBytes());
219
220 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
221 ASSERT(!block->liveBytes());
222#endif
223
224 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
225 block->didSurviveGC();
226
227 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
228 block->didSurviveGC();
229}
230
231void CopiedSpace::doneCopying()
232{
233 {
234 MutexLocker locker(m_loanedBlocksLock);
235 while (m_numberOfLoanedBlocks > 0)
236 m_loanedBlocksCondition.wait(m_loanedBlocksLock);
237 }
238
239 ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
240 m_inCopyingPhase = false;
241
242 DoublyLinkedList<CopiedBlock>* toSpace;
243 DoublyLinkedList<CopiedBlock>* fromSpace;
244 TinyBloomFilter* blockFilter;
245 if (heap()->operationInProgress() == FullCollection) {
246 toSpace = m_oldGen.toSpace;
247 fromSpace = m_oldGen.fromSpace;
248 blockFilter = &m_oldGen.blockFilter;
249 } else {
250 toSpace = m_newGen.toSpace;
251 fromSpace = m_newGen.fromSpace;
252 blockFilter = &m_newGen.blockFilter;
253 }
254
255 while (!fromSpace->isEmpty()) {
256 CopiedBlock* block = fromSpace->removeHead();
257 // We don't add the block to the blockSet because it was never removed.
258 ASSERT(m_blockSet.contains(block));
259 blockFilter->add(reinterpret_cast<Bits>(block));
260 block->didSurviveGC();
261 toSpace->push(block);
262 }
263
264 if (heap()->operationInProgress() == EdenCollection) {
265 m_oldGen.toSpace->append(*m_newGen.toSpace);
266 m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
267 m_oldGen.blockFilter.add(m_newGen.blockFilter);
268 m_newGen.blockFilter.reset();
269 }
270
271 ASSERT(m_newGen.toSpace->isEmpty());
272 ASSERT(m_newGen.fromSpace->isEmpty());
273 ASSERT(m_newGen.oversizeBlocks.isEmpty());
274
275 allocateBlock();
276
277 m_shouldDoCopyPhase = false;
278}
279
280size_t CopiedSpace::size()
281{
282 size_t calculatedSize = 0;
283
284 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
285 calculatedSize += block->size();
286
287 for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
288 calculatedSize += block->size();
289
290 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
291 calculatedSize += block->size();
292
293 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
294 calculatedSize += block->size();
295
296 for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
297 calculatedSize += block->size();
298
299 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
300 calculatedSize += block->size();
301
302 return calculatedSize;
303}
304
305size_t CopiedSpace::capacity()
306{
307 size_t calculatedCapacity = 0;
308
309 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
310 calculatedCapacity += block->capacity();
311
312 for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
313 calculatedCapacity += block->capacity();
314
315 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
316 calculatedCapacity += block->capacity();
317
318 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
319 calculatedCapacity += block->capacity();
320
321 for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
322 calculatedCapacity += block->capacity();
323
324 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
325 calculatedCapacity += block->capacity();
326
327 return calculatedCapacity;
328}
329
330static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list)
331{
332 unsigned itersSinceLastTimeCheck = 0;
333 CopiedBlock* current = list->head();
334 while (current) {
335 current = current->next();
336 ++itersSinceLastTimeCheck;
337 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
338 double currentTime = WTF::monotonicallyIncreasingTime();
339 if (currentTime > deadline)
340 return true;
341 itersSinceLastTimeCheck = 0;
342 }
343 }
344
345 return false;
346}
347
348bool CopiedSpace::isPagedOut(double deadline)
349{
350 return isBlockListPagedOut(deadline, m_oldGen.toSpace)
351 || isBlockListPagedOut(deadline, m_oldGen.fromSpace)
352 || isBlockListPagedOut(deadline, &m_oldGen.oversizeBlocks)
353 || isBlockListPagedOut(deadline, m_newGen.toSpace)
354 || isBlockListPagedOut(deadline, m_newGen.fromSpace)
355 || isBlockListPagedOut(deadline, &m_newGen.oversizeBlocks);
356}
357
358} // namespace JSC