]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/CopiedSpaceInlines.h
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / heap / CopiedSpaceInlines.h
1 /*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef CopiedSpaceInlines_h
27 #define CopiedSpaceInlines_h
28
29 #include "CopiedBlock.h"
30 #include "CopiedSpace.h"
31 #include "Heap.h"
32 #include "HeapBlock.h"
33 #include "VM.h"
34 #include <wtf/CheckedBoolean.h>
35
36 namespace JSC {
37
38 inline bool CopiedSpace::contains(CopiedBlock* block)
39 {
40 return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)))
41 && m_blockSet.contains(block);
42 }
43
44 inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
45 {
46 CopiedBlock* block = blockFor(ptr);
47 if (contains(block)) {
48 result = block;
49 return true;
50 }
51 result = 0;
52 return false;
53 }
54
55 inline void CopiedSpace::pin(CopiedBlock* block)
56 {
57 block->pin();
58 }
59
60 inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
61 {
62 // Pointers into the copied space come in the following varieties:
63 // 1) Pointers to the start of a span of memory. This is the most
64 // natural though not necessarily the most common.
65 // 2) Pointers to one value-sized (8 byte) word past the end of
66 // a span of memory. This currently occurs with semi-butterflies
67 // and should be fixed soon, once the other half of the
68 // butterfly lands.
69 // 3) Pointers to the innards arising from loop induction variable
70 // optimizations (either manual ones or automatic, by the
71 // compiler).
72 // 4) Pointers to the end of a span of memory in arising from
73 // induction variable optimizations combined with the
74 // GC-to-compiler contract laid out in the C spec: a pointer to
75 // the end of a span of memory must be considered to be a
76 // pointer to that memory.
77
78 EncodedJSValue* pointer = reinterpret_cast<EncodedJSValue*>(opaquePointer);
79 CopiedBlock* block;
80
81 // Handle (1) and (3).
82 if (contains(pointer, block))
83 pin(block);
84
85 // Handle (4). We don't have to explicitly check and pin the block under this
86 // pointer because it cannot possibly point to something that cases (1) and
87 // (3) above or case (2) below wouldn't already catch.
88 pointer--;
89
90 // Handle (2)
91 pointer--;
92 if (contains(pointer, block))
93 pin(block);
94 }
95
96 inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType)
97 {
98 ASSERT(block);
99 ASSERT(block->canBeRecycled());
100 ASSERT(!block->m_isPinned);
101 {
102 SpinLockHolder locker(&m_toSpaceLock);
103 m_blockSet.remove(block);
104 if (collectionType == EdenCollection)
105 m_newGen.fromSpace->remove(block);
106 else
107 m_oldGen.fromSpace->remove(block);
108 }
109 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
110 }
111
112 inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
113 {
114 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
115
116 {
117 MutexLocker locker(m_loanedBlocksLock);
118 ASSERT(m_numberOfLoanedBlocks > 0);
119 ASSERT(m_inCopyingPhase);
120 m_numberOfLoanedBlocks--;
121 if (!m_numberOfLoanedBlocks)
122 m_loanedBlocksCondition.signal();
123 }
124 }
125
126 inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
127 {
128 ASSERT(m_inCopyingPhase);
129 CopiedBlock* block = CopiedBlock::createNoZeroFill(m_heap->blockAllocator().allocate<CopiedBlock>());
130
131 {
132 MutexLocker locker(m_loanedBlocksLock);
133 m_numberOfLoanedBlocks++;
134 }
135
136 ASSERT(!block->dataSize());
137 return block;
138 }
139
140 inline void CopiedSpace::allocateBlock()
141 {
142 m_heap->collectIfNecessaryOrDefer();
143
144 m_allocator.resetCurrentBlock();
145
146 CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>());
147
148 m_newGen.toSpace->push(block);
149 m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
150 m_blockSet.add(block);
151 m_allocator.setCurrentBlock(block);
152 }
153
154 inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
155 {
156 ASSERT(!m_heap->vm()->isInitializingObject());
157 ASSERT(bytes);
158
159 if (!m_allocator.tryAllocate(bytes, outPtr))
160 return tryAllocateSlowCase(bytes, outPtr);
161
162 ASSERT(*outPtr);
163 return true;
164 }
165
166 inline bool CopiedSpace::isOversize(size_t bytes)
167 {
168 return bytes > s_maxAllocationSize;
169 }
170
171 inline bool CopiedSpace::isPinned(void* ptr)
172 {
173 return blockFor(ptr)->m_isPinned;
174 }
175
176 inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
177 {
178 return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
179 }
180
181 template <HeapOperation collectionType>
182 inline void CopiedSpace::startedCopying()
183 {
184 DoublyLinkedList<CopiedBlock>* fromSpace;
185 DoublyLinkedList<CopiedBlock>* oversizeBlocks;
186 TinyBloomFilter* blockFilter;
187 if (collectionType == FullCollection) {
188 ASSERT(m_oldGen.fromSpace->isEmpty());
189 ASSERT(m_newGen.fromSpace->isEmpty());
190
191 m_oldGen.toSpace->append(*m_newGen.toSpace);
192 m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
193
194 ASSERT(m_newGen.toSpace->isEmpty());
195 ASSERT(m_newGen.fromSpace->isEmpty());
196 ASSERT(m_newGen.oversizeBlocks.isEmpty());
197
198 std::swap(m_oldGen.fromSpace, m_oldGen.toSpace);
199 fromSpace = m_oldGen.fromSpace;
200 oversizeBlocks = &m_oldGen.oversizeBlocks;
201 blockFilter = &m_oldGen.blockFilter;
202 } else {
203 std::swap(m_newGen.fromSpace, m_newGen.toSpace);
204 fromSpace = m_newGen.fromSpace;
205 oversizeBlocks = &m_newGen.oversizeBlocks;
206 blockFilter = &m_newGen.blockFilter;
207 }
208
209 blockFilter->reset();
210 m_allocator.resetCurrentBlock();
211
212 CopiedBlock* next = 0;
213 size_t totalLiveBytes = 0;
214 size_t totalUsableBytes = 0;
215 for (CopiedBlock* block = fromSpace->head(); block; block = next) {
216 next = block->next();
217 if (!block->isPinned() && block->canBeRecycled()) {
218 recycleEvacuatedBlock(block, collectionType);
219 continue;
220 }
221 ASSERT(block->liveBytes() <= CopiedBlock::blockSize);
222 totalLiveBytes += block->liveBytes();
223 totalUsableBytes += block->payloadCapacity();
224 block->didPromote();
225 }
226
227 CopiedBlock* block = oversizeBlocks->head();
228 while (block) {
229 CopiedBlock* next = block->next();
230 if (block->isPinned()) {
231 blockFilter->add(reinterpret_cast<Bits>(block));
232 totalLiveBytes += block->payloadCapacity();
233 totalUsableBytes += block->payloadCapacity();
234 block->didPromote();
235 } else {
236 oversizeBlocks->remove(block);
237 m_blockSet.remove(block);
238 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
239 }
240 block = next;
241 }
242
243 double markedSpaceBytes = m_heap->objectSpace().capacity();
244 double totalFragmentation = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes);
245 m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalFragmentation <= Options::minHeapUtilization();
246 if (!m_shouldDoCopyPhase) {
247 if (Options::logGC())
248 dataLog("Skipped copying, ");
249 return;
250 }
251
252 if (Options::logGC())
253 dataLogF("Did copy, ");
254 ASSERT(m_shouldDoCopyPhase);
255 ASSERT(!m_numberOfLoanedBlocks);
256 ASSERT(!m_inCopyingPhase);
257 m_inCopyingPhase = true;
258 }
259
260 } // namespace JSC
261
262 #endif // CopiedSpaceInlines_h
263