2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "CopiedSpace.h"
29 #include "CopiedSpaceInlineMethods.h"
30 #include "GCActivityCallback.h"
34 CopiedSpace::CopiedSpace(Heap
* heap
)
38 , m_inCopyingPhase(false)
39 , m_numberOfLoanedBlocks(0)
43 void CopiedSpace::init()
45 m_toSpace
= &m_blocks1
;
46 m_fromSpace
= &m_blocks2
;
52 CheckedBoolean
CopiedSpace::tryAllocateSlowCase(size_t bytes
, void** outPtr
)
54 if (isOversize(bytes
))
55 return tryAllocateOversize(bytes
, outPtr
);
57 ASSERT(m_heap
->globalData()->apiLock().currentThreadIsHoldingLock());
58 m_heap
->didAllocate(m_allocator
.currentCapacity());
64 *outPtr
= m_allocator
.allocate(bytes
);
69 CheckedBoolean
CopiedSpace::tryAllocateOversize(size_t bytes
, void** outPtr
)
71 ASSERT(isOversize(bytes
));
73 size_t blockSize
= WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(CopiedBlock
) + bytes
);
75 PageAllocationAligned allocation
= PageAllocationAligned::allocate(blockSize
, WTF::pageSize(), OSAllocator::JSGCHeapPages
);
76 if (!static_cast<bool>(allocation
)) {
81 CopiedBlock
* block
= new (NotNull
, allocation
.base()) CopiedBlock(allocation
);
82 m_oversizeBlocks
.push(block
);
83 m_oversizeFilter
.add(reinterpret_cast<Bits
>(block
));
85 *outPtr
= allocateFromBlock(block
, bytes
);
87 m_heap
->didAllocate(blockSize
);
92 CheckedBoolean
CopiedSpace::tryReallocate(void** ptr
, size_t oldSize
, size_t newSize
)
94 if (oldSize
>= newSize
)
98 ASSERT(!m_heap
->globalData()->isInitializingObject());
100 if (isOversize(oldSize
) || isOversize(newSize
))
101 return tryReallocateOversize(ptr
, oldSize
, newSize
);
103 if (m_allocator
.wasLastAllocation(oldPtr
, oldSize
)) {
104 size_t delta
= newSize
- oldSize
;
105 if (m_allocator
.fitsInCurrentBlock(delta
)) {
106 (void)m_allocator
.allocate(delta
);
112 if (!tryAllocate(newSize
, &result
)) {
116 memcpy(result
, oldPtr
, oldSize
);
121 CheckedBoolean
CopiedSpace::tryReallocateOversize(void** ptr
, size_t oldSize
, size_t newSize
)
123 ASSERT(isOversize(oldSize
) || isOversize(newSize
));
124 ASSERT(newSize
> oldSize
);
129 if (!tryAllocateOversize(newSize
, &newPtr
)) {
134 memcpy(newPtr
, oldPtr
, oldSize
);
136 if (isOversize(oldSize
)) {
137 CopiedBlock
* oldBlock
= oversizeBlockFor(oldPtr
);
138 m_oversizeBlocks
.remove(oldBlock
);
139 oldBlock
->m_allocation
.deallocate();
146 void CopiedSpace::doneFillingBlock(CopiedBlock
* block
)
149 ASSERT(block
->m_offset
< reinterpret_cast<char*>(block
) + HeapBlock::s_blockSize
);
150 ASSERT(m_inCopyingPhase
);
152 if (block
->m_offset
== block
->payload()) {
158 MutexLocker
locker(m_toSpaceLock
);
159 m_toSpace
->push(block
);
160 m_toSpaceSet
.add(block
);
161 m_toSpaceFilter
.add(reinterpret_cast<Bits
>(block
));
165 MutexLocker
locker(m_loanedBlocksLock
);
166 ASSERT(m_numberOfLoanedBlocks
> 0);
167 m_numberOfLoanedBlocks
--;
168 if (!m_numberOfLoanedBlocks
)
169 m_loanedBlocksCondition
.signal();
173 void CopiedSpace::doneCopying()
176 MutexLocker
locker(m_loanedBlocksLock
);
177 while (m_numberOfLoanedBlocks
> 0)
178 m_loanedBlocksCondition
.wait(m_loanedBlocksLock
);
181 ASSERT(m_inCopyingPhase
);
182 m_inCopyingPhase
= false;
183 while (!m_fromSpace
->isEmpty()) {
184 CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_fromSpace
->removeHead());
185 if (block
->m_isPinned
) {
186 block
->m_isPinned
= false;
187 // We don't add the block to the toSpaceSet because it was never removed.
188 ASSERT(m_toSpaceSet
.contains(block
));
189 m_toSpaceFilter
.add(reinterpret_cast<Bits
>(block
));
190 m_toSpace
->push(block
);
194 m_toSpaceSet
.remove(block
);
195 m_heap
->blockAllocator().deallocate(block
);
198 CopiedBlock
* curr
= static_cast<CopiedBlock
*>(m_oversizeBlocks
.head());
200 CopiedBlock
* next
= static_cast<CopiedBlock
*>(curr
->next());
201 if (!curr
->m_isPinned
) {
202 m_oversizeBlocks
.remove(curr
);
203 curr
->m_allocation
.deallocate();
205 curr
->m_isPinned
= false;
209 if (!m_toSpace
->head()) {
213 m_allocator
.resetCurrentBlock(static_cast<CopiedBlock
*>(m_toSpace
->head()));
216 CheckedBoolean
CopiedSpace::getFreshBlock(AllocationEffort allocationEffort
, CopiedBlock
** outBlock
)
218 CopiedBlock
* block
= 0;
219 if (allocationEffort
== AllocationMustSucceed
) {
220 if (HeapBlock
* heapBlock
= m_heap
->blockAllocator().allocate())
221 block
= new (NotNull
, heapBlock
) CopiedBlock(heapBlock
->m_allocation
);
222 else if (!allocateNewBlock(&block
)) {
224 ASSERT_NOT_REACHED();
228 ASSERT(allocationEffort
== AllocationCanFail
);
229 if (m_heap
->shouldCollect())
230 m_heap
->collect(Heap::DoNotSweep
);
232 if (!getFreshBlock(AllocationMustSucceed
, &block
)) {
234 ASSERT_NOT_REACHED();
239 ASSERT(is8ByteAligned(block
->m_offset
));
244 void CopiedSpace::freeAllBlocks()
246 while (!m_toSpace
->isEmpty())
247 m_heap
->blockAllocator().deallocate(m_toSpace
->removeHead());
249 while (!m_fromSpace
->isEmpty())
250 m_heap
->blockAllocator().deallocate(m_fromSpace
->removeHead());
252 while (!m_oversizeBlocks
.isEmpty())
253 m_oversizeBlocks
.removeHead()->m_allocation
.deallocate();
256 size_t CopiedSpace::size()
258 size_t calculatedSize
= 0;
260 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_toSpace
->head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
261 calculatedSize
+= block
->size();
263 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_fromSpace
->head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
264 calculatedSize
+= block
->size();
266 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_oversizeBlocks
.head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
267 calculatedSize
+= block
->size();
269 return calculatedSize
;
272 size_t CopiedSpace::capacity()
274 size_t calculatedCapacity
= 0;
276 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_toSpace
->head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
277 calculatedCapacity
+= block
->capacity();
279 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_fromSpace
->head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
280 calculatedCapacity
+= block
->capacity();
282 for (CopiedBlock
* block
= static_cast<CopiedBlock
*>(m_oversizeBlocks
.head()); block
; block
= static_cast<CopiedBlock
*>(block
->next()))
283 calculatedCapacity
+= block
->capacity();
285 return calculatedCapacity
;
288 static bool isBlockListPagedOut(double deadline
, DoublyLinkedList
<HeapBlock
>* list
)
290 unsigned itersSinceLastTimeCheck
= 0;
291 HeapBlock
* current
= list
->head();
293 current
= current
->next();
294 ++itersSinceLastTimeCheck
;
295 if (itersSinceLastTimeCheck
>= Heap::s_timeCheckResolution
) {
296 double currentTime
= WTF::monotonicallyIncreasingTime();
297 if (currentTime
> deadline
)
299 itersSinceLastTimeCheck
= 0;
306 bool CopiedSpace::isPagedOut(double deadline
)
308 return isBlockListPagedOut(deadline
, m_toSpace
)
309 || isBlockListPagedOut(deadline
, m_fromSpace
)
310 || isBlockListPagedOut(deadline
, &m_oversizeBlocks
);