]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/CopiedSpace.cpp
JavaScriptCore-1097.3.tar.gz
[apple/javascriptcore.git] / heap / CopiedSpace.cpp
CommitLineData
6fe7ccc8
A
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CopiedSpace.h"
28
29#include "CopiedSpaceInlineMethods.h"
30#include "GCActivityCallback.h"
31
32namespace JSC {
33
34CopiedSpace::CopiedSpace(Heap* heap)
35 : m_heap(heap)
36 , m_toSpace(0)
37 , m_fromSpace(0)
38 , m_inCopyingPhase(false)
39 , m_numberOfLoanedBlocks(0)
40{
41}
42
43void CopiedSpace::init()
44{
45 m_toSpace = &m_blocks1;
46 m_fromSpace = &m_blocks2;
47
48 if (!addNewBlock())
49 CRASH();
50}
51
52CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
53{
54 if (isOversize(bytes))
55 return tryAllocateOversize(bytes, outPtr);
56
57 ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock());
58 m_heap->didAllocate(m_allocator.currentCapacity());
59
60 if (!addNewBlock()) {
61 *outPtr = 0;
62 return false;
63 }
64 *outPtr = m_allocator.allocate(bytes);
65 ASSERT(*outPtr);
66 return true;
67}
68
69CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
70{
71 ASSERT(isOversize(bytes));
72
73 size_t blockSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock) + bytes);
74
75 PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, WTF::pageSize(), OSAllocator::JSGCHeapPages);
76 if (!static_cast<bool>(allocation)) {
77 *outPtr = 0;
78 return false;
79 }
80
81 CopiedBlock* block = new (NotNull, allocation.base()) CopiedBlock(allocation);
82 m_oversizeBlocks.push(block);
83 m_oversizeFilter.add(reinterpret_cast<Bits>(block));
84
85 *outPtr = allocateFromBlock(block, bytes);
86
87 m_heap->didAllocate(blockSize);
88
89 return true;
90}
91
92CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
93{
94 if (oldSize >= newSize)
95 return true;
96
97 void* oldPtr = *ptr;
98 ASSERT(!m_heap->globalData()->isInitializingObject());
99
100 if (isOversize(oldSize) || isOversize(newSize))
101 return tryReallocateOversize(ptr, oldSize, newSize);
102
103 if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
104 size_t delta = newSize - oldSize;
105 if (m_allocator.fitsInCurrentBlock(delta)) {
106 (void)m_allocator.allocate(delta);
107 return true;
108 }
109 }
110
111 void* result = 0;
112 if (!tryAllocate(newSize, &result)) {
113 *ptr = 0;
114 return false;
115 }
116 memcpy(result, oldPtr, oldSize);
117 *ptr = result;
118 return true;
119}
120
121CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
122{
123 ASSERT(isOversize(oldSize) || isOversize(newSize));
124 ASSERT(newSize > oldSize);
125
126 void* oldPtr = *ptr;
127
128 void* newPtr = 0;
129 if (!tryAllocateOversize(newSize, &newPtr)) {
130 *ptr = 0;
131 return false;
132 }
133
134 memcpy(newPtr, oldPtr, oldSize);
135
136 if (isOversize(oldSize)) {
137 CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
138 m_oversizeBlocks.remove(oldBlock);
139 oldBlock->m_allocation.deallocate();
140 }
141
142 *ptr = newPtr;
143 return true;
144}
145
146void CopiedSpace::doneFillingBlock(CopiedBlock* block)
147{
148 ASSERT(block);
149 ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
150 ASSERT(m_inCopyingPhase);
151
152 if (block->m_offset == block->payload()) {
153 recycleBlock(block);
154 return;
155 }
156
157 {
158 MutexLocker locker(m_toSpaceLock);
159 m_toSpace->push(block);
160 m_toSpaceSet.add(block);
161 m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
162 }
163
164 {
165 MutexLocker locker(m_loanedBlocksLock);
166 ASSERT(m_numberOfLoanedBlocks > 0);
167 m_numberOfLoanedBlocks--;
168 if (!m_numberOfLoanedBlocks)
169 m_loanedBlocksCondition.signal();
170 }
171}
172
173void CopiedSpace::doneCopying()
174{
175 {
176 MutexLocker locker(m_loanedBlocksLock);
177 while (m_numberOfLoanedBlocks > 0)
178 m_loanedBlocksCondition.wait(m_loanedBlocksLock);
179 }
180
181 ASSERT(m_inCopyingPhase);
182 m_inCopyingPhase = false;
183 while (!m_fromSpace->isEmpty()) {
184 CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
185 if (block->m_isPinned) {
186 block->m_isPinned = false;
187 // We don't add the block to the toSpaceSet because it was never removed.
188 ASSERT(m_toSpaceSet.contains(block));
189 m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
190 m_toSpace->push(block);
191 continue;
192 }
193
194 m_toSpaceSet.remove(block);
195 m_heap->blockAllocator().deallocate(block);
196 }
197
198 CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
199 while (curr) {
200 CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
201 if (!curr->m_isPinned) {
202 m_oversizeBlocks.remove(curr);
203 curr->m_allocation.deallocate();
204 } else
205 curr->m_isPinned = false;
206 curr = next;
207 }
208
209 if (!m_toSpace->head()) {
210 if (!addNewBlock())
211 CRASH();
212 } else
213 m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
214}
215
216CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock)
217{
218 CopiedBlock* block = 0;
219 if (allocationEffort == AllocationMustSucceed) {
220 if (HeapBlock* heapBlock = m_heap->blockAllocator().allocate())
221 block = new (NotNull, heapBlock) CopiedBlock(heapBlock->m_allocation);
222 else if (!allocateNewBlock(&block)) {
223 *outBlock = 0;
224 ASSERT_NOT_REACHED();
225 return false;
226 }
227 } else {
228 ASSERT(allocationEffort == AllocationCanFail);
229 if (m_heap->shouldCollect())
230 m_heap->collect(Heap::DoNotSweep);
231
232 if (!getFreshBlock(AllocationMustSucceed, &block)) {
233 *outBlock = 0;
234 ASSERT_NOT_REACHED();
235 return false;
236 }
237 }
238 ASSERT(block);
239 ASSERT(is8ByteAligned(block->m_offset));
240 *outBlock = block;
241 return true;
242}
243
244void CopiedSpace::freeAllBlocks()
245{
246 while (!m_toSpace->isEmpty())
247 m_heap->blockAllocator().deallocate(m_toSpace->removeHead());
248
249 while (!m_fromSpace->isEmpty())
250 m_heap->blockAllocator().deallocate(m_fromSpace->removeHead());
251
252 while (!m_oversizeBlocks.isEmpty())
253 m_oversizeBlocks.removeHead()->m_allocation.deallocate();
254}
255
256size_t CopiedSpace::size()
257{
258 size_t calculatedSize = 0;
259
260 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
261 calculatedSize += block->size();
262
263 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
264 calculatedSize += block->size();
265
266 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
267 calculatedSize += block->size();
268
269 return calculatedSize;
270}
271
272size_t CopiedSpace::capacity()
273{
274 size_t calculatedCapacity = 0;
275
276 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_toSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
277 calculatedCapacity += block->capacity();
278
279 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->head()); block; block = static_cast<CopiedBlock*>(block->next()))
280 calculatedCapacity += block->capacity();
281
282 for (CopiedBlock* block = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); block; block = static_cast<CopiedBlock*>(block->next()))
283 calculatedCapacity += block->capacity();
284
285 return calculatedCapacity;
286}
287
288static bool isBlockListPagedOut(double deadline, DoublyLinkedList<HeapBlock>* list)
289{
290 unsigned itersSinceLastTimeCheck = 0;
291 HeapBlock* current = list->head();
292 while (current) {
293 current = current->next();
294 ++itersSinceLastTimeCheck;
295 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
296 double currentTime = WTF::monotonicallyIncreasingTime();
297 if (currentTime > deadline)
298 return true;
299 itersSinceLastTimeCheck = 0;
300 }
301 }
302
303 return false;
304}
305
306bool CopiedSpace::isPagedOut(double deadline)
307{
308 return isBlockListPagedOut(deadline, m_toSpace)
309 || isBlockListPagedOut(deadline, m_fromSpace)
310 || isBlockListPagedOut(deadline, &m_oversizeBlocks);
311}
312
313} // namespace JSC