]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' | |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS | |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
23 | * THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "MarkedBlock.h" | |
28 | ||
29 | #include "IncrementalSweeper.h" | |
30 | #include "JSCell.h" | |
31 | #include "JSDestructibleObject.h" | |
32 | #include "JSCInlines.h" | |
33 | ||
34 | namespace JSC { | |
35 | ||
36 | MarkedBlock* MarkedBlock::create(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction) | |
37 | { | |
38 | return new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction); | |
39 | } | |
40 | ||
41 | void MarkedBlock::destroy(MarkedBlock* block) | |
42 | { | |
43 | block->~MarkedBlock(); | |
44 | fastAlignedFree(block); | |
45 | } | |
46 | ||
47 | MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction) | |
48 | : DoublyLinkedListNode<MarkedBlock>() | |
49 | , m_atomsPerCell((cellSize + atomSize - 1) / atomSize) | |
50 | , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1) | |
51 | , m_capacity(capacity) | |
52 | , m_needsDestruction(needsDestruction) | |
53 | , m_allocator(allocator) | |
54 | , m_state(New) // All cells start out unmarked. | |
55 | , m_weakSet(allocator->heap()->vm(), *this) | |
56 | { | |
57 | ASSERT(allocator); | |
58 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
59 | } | |
60 | ||
61 | inline void MarkedBlock::callDestructor(JSCell* cell) | |
62 | { | |
63 | // A previous eager sweep may already have run cell's destructor. | |
64 | if (cell->isZapped()) | |
65 | return; | |
66 | ||
67 | ASSERT(cell->structureID()); | |
68 | if (cell->inlineTypeFlags() & StructureIsImmortal) | |
69 | cell->structure(*vm())->classInfo()->methodTable.destroy(cell); | |
70 | else | |
71 | jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell); | |
72 | cell->zap(); | |
73 | } | |
74 | ||
75 | template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors> | |
76 | MarkedBlock::FreeList MarkedBlock::specializedSweep() | |
77 | { | |
78 | ASSERT(blockState != Allocated && blockState != FreeListed); | |
79 | ASSERT(!(!callDestructors && sweepMode == SweepOnly)); | |
80 | ||
81 | SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping"); | |
82 | ||
83 | // This produces a free list that is ordered in reverse through the block. | |
84 | // This is fine, since the allocation code makes no assumptions about the | |
85 | // order of the free list. | |
86 | FreeCell* head = 0; | |
87 | size_t count = 0; | |
88 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
89 | if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i)))) | |
90 | continue; | |
91 | ||
92 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
93 | ||
94 | if (callDestructors && blockState != New) | |
95 | callDestructor(cell); | |
96 | ||
97 | if (sweepMode == SweepToFreeList) { | |
98 | FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell); | |
99 | freeCell->next = head; | |
100 | head = freeCell; | |
101 | ++count; | |
102 | } | |
103 | } | |
104 | ||
105 | // We only want to discard the newlyAllocated bits if we're creating a FreeList, | |
106 | // otherwise we would lose information on what's currently alive. | |
107 | if (sweepMode == SweepToFreeList && m_newlyAllocated) | |
108 | m_newlyAllocated = nullptr; | |
109 | ||
110 | m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked); | |
111 | return FreeList(head, count * cellSize()); | |
112 | } | |
113 | ||
114 | MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode) | |
115 | { | |
116 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
117 | ||
118 | m_weakSet.sweep(); | |
119 | ||
120 | if (sweepMode == SweepOnly && !m_needsDestruction) | |
121 | return FreeList(); | |
122 | ||
123 | if (m_needsDestruction) | |
124 | return sweepHelper<true>(sweepMode); | |
125 | return sweepHelper<false>(sweepMode); | |
126 | } | |
127 | ||
128 | template<bool callDestructors> | |
129 | MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode) | |
130 | { | |
131 | switch (m_state) { | |
132 | case New: | |
133 | ASSERT(sweepMode == SweepToFreeList); | |
134 | return specializedSweep<New, SweepToFreeList, callDestructors>(); | |
135 | case FreeListed: | |
136 | // Happens when a block transitions to fully allocated. | |
137 | ASSERT(sweepMode == SweepToFreeList); | |
138 | return FreeList(); | |
139 | case Retired: | |
140 | case Allocated: | |
141 | RELEASE_ASSERT_NOT_REACHED(); | |
142 | return FreeList(); | |
143 | case Marked: | |
144 | return sweepMode == SweepToFreeList | |
145 | ? specializedSweep<Marked, SweepToFreeList, callDestructors>() | |
146 | : specializedSweep<Marked, SweepOnly, callDestructors>(); | |
147 | } | |
148 | ||
149 | RELEASE_ASSERT_NOT_REACHED(); | |
150 | return FreeList(); | |
151 | } | |
152 | ||
153 | class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor { | |
154 | public: | |
155 | SetNewlyAllocatedFunctor(MarkedBlock* block) | |
156 | : m_block(block) | |
157 | { | |
158 | } | |
159 | ||
160 | IterationStatus operator()(JSCell* cell) | |
161 | { | |
162 | ASSERT(MarkedBlock::blockFor(cell) == m_block); | |
163 | m_block->setNewlyAllocated(cell); | |
164 | return IterationStatus::Continue; | |
165 | } | |
166 | ||
167 | private: | |
168 | MarkedBlock* m_block; | |
169 | }; | |
170 | ||
171 | void MarkedBlock::stopAllocating(const FreeList& freeList) | |
172 | { | |
173 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
174 | FreeCell* head = freeList.head; | |
175 | ||
176 | if (m_state == Marked) { | |
177 | // If the block is in the Marked state then we know that: | |
178 | // 1) It was not used for allocation during the previous allocation cycle. | |
179 | // 2) It may have dead objects, and we only know them to be dead by the | |
180 | // fact that their mark bits are unset. | |
181 | // Hence if the block is Marked we need to leave it Marked. | |
182 | ||
183 | ASSERT(!head); | |
184 | return; | |
185 | } | |
186 | ||
187 | ASSERT(m_state == FreeListed); | |
188 | ||
189 | // Roll back to a coherent state for Heap introspection. Cells newly | |
190 | // allocated from our free list are not currently marked, so we need another | |
191 | // way to tell what's live vs dead. | |
192 | ||
193 | ASSERT(!m_newlyAllocated); | |
194 | m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>(); | |
195 | ||
196 | SetNewlyAllocatedFunctor functor(this); | |
197 | forEachCell(functor); | |
198 | ||
199 | FreeCell* next; | |
200 | for (FreeCell* current = head; current; current = next) { | |
201 | next = current->next; | |
202 | reinterpret_cast<JSCell*>(current)->zap(); | |
203 | clearNewlyAllocated(current); | |
204 | } | |
205 | ||
206 | m_state = Marked; | |
207 | } | |
208 | ||
209 | void MarkedBlock::clearMarks() | |
210 | { | |
211 | #if ENABLE(GGC) | |
212 | if (heap()->operationInProgress() == JSC::EdenCollection) | |
213 | this->clearMarksWithCollectionType<EdenCollection>(); | |
214 | else | |
215 | this->clearMarksWithCollectionType<FullCollection>(); | |
216 | #else | |
217 | this->clearMarksWithCollectionType<FullCollection>(); | |
218 | #endif | |
219 | } | |
220 | ||
221 | template <HeapOperation collectionType> | |
222 | void MarkedBlock::clearMarksWithCollectionType() | |
223 | { | |
224 | ASSERT(collectionType == FullCollection || collectionType == EdenCollection); | |
225 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
226 | ||
227 | ASSERT(m_state != New && m_state != FreeListed); | |
228 | if (collectionType == FullCollection) { | |
229 | m_marks.clearAll(); | |
230 | // This will become true at the end of the mark phase. We set it now to | |
231 | // avoid an extra pass to do so later. | |
232 | m_state = Marked; | |
233 | return; | |
234 | } | |
235 | ||
236 | ASSERT(collectionType == EdenCollection); | |
237 | // If a block was retired then there's no way an EdenCollection can un-retire it. | |
238 | if (m_state != Retired) | |
239 | m_state = Marked; | |
240 | } | |
241 | ||
242 | void MarkedBlock::lastChanceToFinalize() | |
243 | { | |
244 | m_weakSet.lastChanceToFinalize(); | |
245 | ||
246 | clearNewlyAllocated(); | |
247 | clearMarksWithCollectionType<FullCollection>(); | |
248 | sweep(); | |
249 | } | |
250 | ||
251 | MarkedBlock::FreeList MarkedBlock::resumeAllocating() | |
252 | { | |
253 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
254 | ||
255 | ASSERT(m_state == Marked); | |
256 | ||
257 | if (!m_newlyAllocated) { | |
258 | // We didn't have to create a "newly allocated" bitmap. That means we were already Marked | |
259 | // when we last stopped allocation, so return an empty free list and stay in the Marked state. | |
260 | return FreeList(); | |
261 | } | |
262 | ||
263 | // Re-create our free list from before stopping allocation. | |
264 | return sweep(SweepToFreeList); | |
265 | } | |
266 | ||
267 | void MarkedBlock::didRetireBlock(const FreeList& freeList) | |
268 | { | |
269 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
270 | FreeCell* head = freeList.head; | |
271 | ||
272 | // Currently we don't notify the Heap that we're giving up on this block. | |
273 | // The Heap might be able to make a better decision about how many bytes should | |
274 | // be allocated before the next collection if it knew about this retired block. | |
275 | // On the other hand we'll waste at most 10% of our Heap space between FullCollections | |
276 | // and only under heavy fragmentation. | |
277 | ||
278 | // We need to zap the free list when retiring a block so that we don't try to destroy | |
279 | // previously destroyed objects when we re-sweep the block in the future. | |
280 | FreeCell* next; | |
281 | for (FreeCell* current = head; current; current = next) { | |
282 | next = current->next; | |
283 | reinterpret_cast<JSCell*>(current)->zap(); | |
284 | } | |
285 | ||
286 | ASSERT(m_state == FreeListed); | |
287 | m_state = Retired; | |
288 | } | |
289 | ||
290 | } // namespace JSC |