2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef CopiedSpaceInlines_h
27 #define CopiedSpaceInlines_h
29 #include "CopiedBlock.h"
30 #include "CopiedSpace.h"
32 #include "HeapBlock.h"
34 #include <wtf/CheckedBoolean.h>
38 inline bool CopiedSpace::contains(CopiedBlock
* block
)
40 return (!m_newGen
.blockFilter
.ruleOut(reinterpret_cast<Bits
>(block
)) || !m_oldGen
.blockFilter
.ruleOut(reinterpret_cast<Bits
>(block
)))
41 && m_blockSet
.contains(block
);
44 inline bool CopiedSpace::contains(void* ptr
, CopiedBlock
*& result
)
46 CopiedBlock
* block
= blockFor(ptr
);
47 if (contains(block
)) {
55 inline void CopiedSpace::pin(CopiedBlock
* block
)
60 inline void CopiedSpace::pinIfNecessary(void* opaquePointer
)
62 // Pointers into the copied space come in the following varieties:
63 // 1) Pointers to the start of a span of memory. This is the most
64 // natural though not necessarily the most common.
65 // 2) Pointers to one value-sized (8 byte) word past the end of
66 // a span of memory. This currently occurs with semi-butterflies
67 // and should be fixed soon, once the other half of the
69 // 3) Pointers to the innards arising from loop induction variable
70 // optimizations (either manual ones or automatic, by the
72 // 4) Pointers to the end of a span of memory in arising from
73 // induction variable optimizations combined with the
74 // GC-to-compiler contract laid out in the C spec: a pointer to
75 // the end of a span of memory must be considered to be a
76 // pointer to that memory.
78 EncodedJSValue
* pointer
= reinterpret_cast<EncodedJSValue
*>(opaquePointer
);
81 // Handle (1) and (3).
82 if (contains(pointer
, block
))
85 // Handle (4). We don't have to explicitly check and pin the block under this
86 // pointer because it cannot possibly point to something that cases (1) and
87 // (3) above or case (2) below wouldn't already catch.
92 if (contains(pointer
, block
))
96 inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock
* block
, HeapOperation collectionType
)
99 ASSERT(block
->canBeRecycled());
100 ASSERT(!block
->m_isPinned
);
102 SpinLockHolder
locker(&m_toSpaceLock
);
103 m_blockSet
.remove(block
);
104 if (collectionType
== EdenCollection
)
105 m_newGen
.fromSpace
->remove(block
);
107 m_oldGen
.fromSpace
->remove(block
);
109 m_heap
->blockAllocator().deallocate(CopiedBlock::destroy(block
));
112 inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock
* block
)
114 m_heap
->blockAllocator().deallocate(CopiedBlock::destroy(block
));
117 MutexLocker
locker(m_loanedBlocksLock
);
118 ASSERT(m_numberOfLoanedBlocks
> 0);
119 ASSERT(m_inCopyingPhase
);
120 m_numberOfLoanedBlocks
--;
121 if (!m_numberOfLoanedBlocks
)
122 m_loanedBlocksCondition
.signal();
126 inline CopiedBlock
* CopiedSpace::allocateBlockForCopyingPhase()
128 ASSERT(m_inCopyingPhase
);
129 CopiedBlock
* block
= CopiedBlock::createNoZeroFill(m_heap
->blockAllocator().allocate
<CopiedBlock
>());
132 MutexLocker
locker(m_loanedBlocksLock
);
133 m_numberOfLoanedBlocks
++;
136 ASSERT(!block
->dataSize());
140 inline void CopiedSpace::allocateBlock()
142 m_heap
->collectIfNecessaryOrDefer();
144 m_allocator
.resetCurrentBlock();
146 CopiedBlock
* block
= CopiedBlock::create(m_heap
->blockAllocator().allocate
<CopiedBlock
>());
148 m_newGen
.toSpace
->push(block
);
149 m_newGen
.blockFilter
.add(reinterpret_cast<Bits
>(block
));
150 m_blockSet
.add(block
);
151 m_allocator
.setCurrentBlock(block
);
154 inline CheckedBoolean
CopiedSpace::tryAllocate(size_t bytes
, void** outPtr
)
156 ASSERT(!m_heap
->vm()->isInitializingObject());
159 if (!m_allocator
.tryAllocate(bytes
, outPtr
))
160 return tryAllocateSlowCase(bytes
, outPtr
);
166 inline bool CopiedSpace::isOversize(size_t bytes
)
168 return bytes
> s_maxAllocationSize
;
171 inline bool CopiedSpace::isPinned(void* ptr
)
173 return blockFor(ptr
)->m_isPinned
;
176 inline CopiedBlock
* CopiedSpace::blockFor(void* ptr
)
178 return reinterpret_cast<CopiedBlock
*>(reinterpret_cast<size_t>(ptr
) & s_blockMask
);
181 template <HeapOperation collectionType
>
182 inline void CopiedSpace::startedCopying()
184 DoublyLinkedList
<CopiedBlock
>* fromSpace
;
185 DoublyLinkedList
<CopiedBlock
>* oversizeBlocks
;
186 TinyBloomFilter
* blockFilter
;
187 if (collectionType
== FullCollection
) {
188 ASSERT(m_oldGen
.fromSpace
->isEmpty());
189 ASSERT(m_newGen
.fromSpace
->isEmpty());
191 m_oldGen
.toSpace
->append(*m_newGen
.toSpace
);
192 m_oldGen
.oversizeBlocks
.append(m_newGen
.oversizeBlocks
);
194 ASSERT(m_newGen
.toSpace
->isEmpty());
195 ASSERT(m_newGen
.fromSpace
->isEmpty());
196 ASSERT(m_newGen
.oversizeBlocks
.isEmpty());
198 std::swap(m_oldGen
.fromSpace
, m_oldGen
.toSpace
);
199 fromSpace
= m_oldGen
.fromSpace
;
200 oversizeBlocks
= &m_oldGen
.oversizeBlocks
;
201 blockFilter
= &m_oldGen
.blockFilter
;
203 std::swap(m_newGen
.fromSpace
, m_newGen
.toSpace
);
204 fromSpace
= m_newGen
.fromSpace
;
205 oversizeBlocks
= &m_newGen
.oversizeBlocks
;
206 blockFilter
= &m_newGen
.blockFilter
;
209 blockFilter
->reset();
210 m_allocator
.resetCurrentBlock();
212 CopiedBlock
* next
= 0;
213 size_t totalLiveBytes
= 0;
214 size_t totalUsableBytes
= 0;
215 for (CopiedBlock
* block
= fromSpace
->head(); block
; block
= next
) {
216 next
= block
->next();
217 if (!block
->isPinned() && block
->canBeRecycled()) {
218 recycleEvacuatedBlock(block
, collectionType
);
221 ASSERT(block
->liveBytes() <= CopiedBlock::blockSize
);
222 totalLiveBytes
+= block
->liveBytes();
223 totalUsableBytes
+= block
->payloadCapacity();
227 CopiedBlock
* block
= oversizeBlocks
->head();
229 CopiedBlock
* next
= block
->next();
230 if (block
->isPinned()) {
231 blockFilter
->add(reinterpret_cast<Bits
>(block
));
232 totalLiveBytes
+= block
->payloadCapacity();
233 totalUsableBytes
+= block
->payloadCapacity();
236 oversizeBlocks
->remove(block
);
237 m_blockSet
.remove(block
);
238 m_heap
->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block
));
243 double markedSpaceBytes
= m_heap
->objectSpace().capacity();
244 double totalFragmentation
= static_cast<double>(totalLiveBytes
+ markedSpaceBytes
) / static_cast<double>(totalUsableBytes
+ markedSpaceBytes
);
245 m_shouldDoCopyPhase
= m_heap
->operationInProgress() == EdenCollection
|| totalFragmentation
<= Options::minHeapUtilization();
246 if (!m_shouldDoCopyPhase
) {
247 if (Options::logGC())
248 dataLog("Skipped copying, ");
252 if (Options::logGC())
253 dataLogF("Did copy, ");
254 ASSERT(m_shouldDoCopyPhase
);
255 ASSERT(!m_numberOfLoanedBlocks
);
256 ASSERT(!m_inCopyingPhase
);
257 m_inCopyingPhase
= true;
262 #endif // CopiedSpaceInlines_h