]>
Commit | Line | Data |
---|---|---|
14957cd0 A |
1 | /* |
2 | * Copyright (C) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' | |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS | |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
23 | * THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "MarkedBlock.h" | |
28 | ||
93a37866 | 29 | #include "IncrementalSweeper.h" |
14957cd0 | 30 | #include "JSCell.h" |
93a37866 | 31 | #include "JSDestructibleObject.h" |
81345200 | 32 | #include "JSCInlines.h" |
14957cd0 A |
33 | |
34 | namespace JSC { | |
35 | ||
ed1e77d3 | 36 | MarkedBlock* MarkedBlock::create(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction) |
14957cd0 | 37 | { |
ed1e77d3 | 38 | return new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction); |
6fe7ccc8 A |
39 | } |
40 | ||
ed1e77d3 A |
41 | void MarkedBlock::destroy(MarkedBlock* block) |
42 | { | |
43 | block->~MarkedBlock(); | |
44 | fastAlignedFree(block); | |
45 | } | |
46 | ||
47 | MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction) | |
48 | : DoublyLinkedListNode<MarkedBlock>() | |
6fe7ccc8 | 49 | , m_atomsPerCell((cellSize + atomSize - 1) / atomSize) |
ed1e77d3 A |
50 | , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1) |
51 | , m_capacity(capacity) | |
52 | , m_needsDestruction(needsDestruction) | |
93a37866 | 53 | , m_allocator(allocator) |
6fe7ccc8 | 54 | , m_state(New) // All cells start out unmarked. |
ed1e77d3 | 55 | , m_weakSet(allocator->heap()->vm(), *this) |
6fe7ccc8 | 56 | { |
93a37866 | 57 | ASSERT(allocator); |
6fe7ccc8 A |
58 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); |
59 | } | |
60 | ||
61 | inline void MarkedBlock::callDestructor(JSCell* cell) | |
14957cd0 | 62 | { |
6fe7ccc8 A |
63 | // A previous eager sweep may already have run cell's destructor. |
64 | if (cell->isZapped()) | |
65 | return; | |
66 | ||
ed1e77d3 A |
67 | ASSERT(cell->structureID()); |
68 | if (cell->inlineTypeFlags() & StructureIsImmortal) | |
81345200 | 69 | cell->structure(*vm())->classInfo()->methodTable.destroy(cell); |
ed1e77d3 A |
70 | else |
71 | jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell); | |
6fe7ccc8 | 72 | cell->zap(); |
14957cd0 A |
73 | } |
74 | ||
ed1e77d3 | 75 | template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors> |
6fe7ccc8 | 76 | MarkedBlock::FreeList MarkedBlock::specializedSweep() |
14957cd0 | 77 | { |
6fe7ccc8 | 78 | ASSERT(blockState != Allocated && blockState != FreeListed); |
ed1e77d3 | 79 | ASSERT(!(!callDestructors && sweepMode == SweepOnly)); |
14957cd0 | 80 | |
ed1e77d3 A |
81 | SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping"); |
82 | ||
6fe7ccc8 A |
83 | // This produces a free list that is ordered in reverse through the block. |
84 | // This is fine, since the allocation code makes no assumptions about the | |
85 | // order of the free list. | |
86 | FreeCell* head = 0; | |
87 | size_t count = 0; | |
14957cd0 | 88 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { |
93a37866 | 89 | if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i)))) |
14957cd0 A |
90 | continue; |
91 | ||
6fe7ccc8 | 92 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); |
6fe7ccc8 | 93 | |
ed1e77d3 A |
94 | if (callDestructors && blockState != New) |
95 | callDestructor(cell); | |
6fe7ccc8 A |
96 | |
97 | if (sweepMode == SweepToFreeList) { | |
98 | FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell); | |
99 | freeCell->next = head; | |
100 | head = freeCell; | |
101 | ++count; | |
14957cd0 | 102 | } |
14957cd0 | 103 | } |
6fe7ccc8 | 104 | |
93a37866 A |
105 | // We only want to discard the newlyAllocated bits if we're creating a FreeList, |
106 | // otherwise we would lose information on what's currently alive. | |
107 | if (sweepMode == SweepToFreeList && m_newlyAllocated) | |
ed1e77d3 | 108 | m_newlyAllocated = nullptr; |
93a37866 A |
109 | |
110 | m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked); | |
6fe7ccc8 A |
111 | return FreeList(head, count * cellSize()); |
112 | } | |
113 | ||
114 | MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode) | |
115 | { | |
116 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
117 | ||
93a37866 A |
118 | m_weakSet.sweep(); |
119 | ||
ed1e77d3 | 120 | if (sweepMode == SweepOnly && !m_needsDestruction) |
6fe7ccc8 A |
121 | return FreeList(); |
122 | ||
ed1e77d3 A |
123 | if (m_needsDestruction) |
124 | return sweepHelper<true>(sweepMode); | |
125 | return sweepHelper<false>(sweepMode); | |
6fe7ccc8 A |
126 | } |
127 | ||
ed1e77d3 | 128 | template<bool callDestructors> |
6fe7ccc8 A |
129 | MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode) |
130 | { | |
131 | switch (m_state) { | |
132 | case New: | |
133 | ASSERT(sweepMode == SweepToFreeList); | |
ed1e77d3 | 134 | return specializedSweep<New, SweepToFreeList, callDestructors>(); |
6fe7ccc8 A |
135 | case FreeListed: |
136 | // Happens when a block transitions to fully allocated. | |
137 | ASSERT(sweepMode == SweepToFreeList); | |
138 | return FreeList(); | |
81345200 | 139 | case Retired: |
6fe7ccc8 | 140 | case Allocated: |
93a37866 | 141 | RELEASE_ASSERT_NOT_REACHED(); |
6fe7ccc8 A |
142 | return FreeList(); |
143 | case Marked: | |
144 | return sweepMode == SweepToFreeList | |
ed1e77d3 A |
145 | ? specializedSweep<Marked, SweepToFreeList, callDestructors>() |
146 | : specializedSweep<Marked, SweepOnly, callDestructors>(); | |
6fe7ccc8 A |
147 | } |
148 | ||
93a37866 | 149 | RELEASE_ASSERT_NOT_REACHED(); |
6fe7ccc8 A |
150 | return FreeList(); |
151 | } | |
152 | ||
93a37866 A |
153 | class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor { |
154 | public: | |
155 | SetNewlyAllocatedFunctor(MarkedBlock* block) | |
156 | : m_block(block) | |
157 | { | |
158 | } | |
159 | ||
ed1e77d3 | 160 | IterationStatus operator()(JSCell* cell) |
93a37866 A |
161 | { |
162 | ASSERT(MarkedBlock::blockFor(cell) == m_block); | |
163 | m_block->setNewlyAllocated(cell); | |
ed1e77d3 | 164 | return IterationStatus::Continue; |
93a37866 A |
165 | } |
166 | ||
167 | private: | |
168 | MarkedBlock* m_block; | |
169 | }; | |
170 | ||
81345200 | 171 | void MarkedBlock::stopAllocating(const FreeList& freeList) |
6fe7ccc8 A |
172 | { |
173 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
174 | FreeCell* head = freeList.head; | |
175 | ||
176 | if (m_state == Marked) { | |
177 | // If the block is in the Marked state then we know that: | |
178 | // 1) It was not used for allocation during the previous allocation cycle. | |
179 | // 2) It may have dead objects, and we only know them to be dead by the | |
180 | // fact that their mark bits are unset. | |
181 | // Hence if the block is Marked we need to leave it Marked. | |
182 | ||
183 | ASSERT(!head); | |
6fe7ccc8 A |
184 | return; |
185 | } | |
93a37866 | 186 | |
6fe7ccc8 A |
187 | ASSERT(m_state == FreeListed); |
188 | ||
189 | // Roll back to a coherent state for Heap introspection. Cells newly | |
190 | // allocated from our free list are not currently marked, so we need another | |
93a37866 | 191 | // way to tell what's live vs dead. |
6fe7ccc8 | 192 | |
93a37866 | 193 | ASSERT(!m_newlyAllocated); |
ed1e77d3 | 194 | m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>(); |
93a37866 A |
195 | |
196 | SetNewlyAllocatedFunctor functor(this); | |
197 | forEachCell(functor); | |
198 | ||
6fe7ccc8 A |
199 | FreeCell* next; |
200 | for (FreeCell* current = head; current; current = next) { | |
201 | next = current->next; | |
202 | reinterpret_cast<JSCell*>(current)->zap(); | |
93a37866 | 203 | clearNewlyAllocated(current); |
6fe7ccc8 A |
204 | } |
205 | ||
93a37866 | 206 | m_state = Marked; |
14957cd0 A |
207 | } |
208 | ||
81345200 A |
209 | void MarkedBlock::clearMarks() |
210 | { | |
211 | #if ENABLE(GGC) | |
212 | if (heap()->operationInProgress() == JSC::EdenCollection) | |
213 | this->clearMarksWithCollectionType<EdenCollection>(); | |
214 | else | |
215 | this->clearMarksWithCollectionType<FullCollection>(); | |
216 | #else | |
217 | this->clearMarksWithCollectionType<FullCollection>(); | |
218 | #endif | |
219 | } | |
220 | ||
81345200 A |
221 | template <HeapOperation collectionType> |
222 | void MarkedBlock::clearMarksWithCollectionType() | |
223 | { | |
224 | ASSERT(collectionType == FullCollection || collectionType == EdenCollection); | |
225 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
226 | ||
227 | ASSERT(m_state != New && m_state != FreeListed); | |
228 | if (collectionType == FullCollection) { | |
229 | m_marks.clearAll(); | |
81345200 A |
230 | // This will become true at the end of the mark phase. We set it now to |
231 | // avoid an extra pass to do so later. | |
232 | m_state = Marked; | |
233 | return; | |
234 | } | |
235 | ||
236 | ASSERT(collectionType == EdenCollection); | |
237 | // If a block was retired then there's no way an EdenCollection can un-retire it. | |
238 | if (m_state != Retired) | |
239 | m_state = Marked; | |
240 | } | |
241 | ||
242 | void MarkedBlock::lastChanceToFinalize() | |
243 | { | |
244 | m_weakSet.lastChanceToFinalize(); | |
245 | ||
246 | clearNewlyAllocated(); | |
247 | clearMarksWithCollectionType<FullCollection>(); | |
248 | sweep(); | |
249 | } | |
250 | ||
251 | MarkedBlock::FreeList MarkedBlock::resumeAllocating() | |
252 | { | |
253 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
254 | ||
255 | ASSERT(m_state == Marked); | |
256 | ||
257 | if (!m_newlyAllocated) { | |
258 | // We didn't have to create a "newly allocated" bitmap. That means we were already Marked | |
259 | // when we last stopped allocation, so return an empty free list and stay in the Marked state. | |
260 | return FreeList(); | |
261 | } | |
262 | ||
263 | // Re-create our free list from before stopping allocation. | |
264 | return sweep(SweepToFreeList); | |
265 | } | |
266 | ||
267 | void MarkedBlock::didRetireBlock(const FreeList& freeList) | |
268 | { | |
269 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
270 | FreeCell* head = freeList.head; | |
271 | ||
272 | // Currently we don't notify the Heap that we're giving up on this block. | |
273 | // The Heap might be able to make a better decision about how many bytes should | |
274 | // be allocated before the next collection if it knew about this retired block. | |
275 | // On the other hand we'll waste at most 10% of our Heap space between FullCollections | |
276 | // and only under heavy fragmentation. | |
277 | ||
278 | // We need to zap the free list when retiring a block so that we don't try to destroy | |
279 | // previously destroyed objects when we re-sweep the block in the future. | |
280 | FreeCell* next; | |
281 | for (FreeCell* current = head; current; current = next) { | |
282 | next = current->next; | |
283 | reinterpret_cast<JSCell*>(current)->zap(); | |
284 | } | |
285 | ||
286 | ASSERT(m_state == FreeListed); | |
287 | m_state = Retired; | |
288 | } | |
289 | ||
14957cd0 | 290 | } // namespace JSC |