2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedBlock.h"
29 #include "IncrementalSweeper.h"
31 #include "JSDestructibleObject.h"
32 #include "JSCInlines.h"
36 MarkedBlock
* MarkedBlock::create(MarkedAllocator
* allocator
, size_t capacity
, size_t cellSize
, bool needsDestruction
)
38 return new (NotNull
, fastAlignedMalloc(blockSize
, capacity
)) MarkedBlock(allocator
, capacity
, cellSize
, needsDestruction
);
41 void MarkedBlock::destroy(MarkedBlock
* block
)
43 block
->~MarkedBlock();
44 fastAlignedFree(block
);
47 MarkedBlock::MarkedBlock(MarkedAllocator
* allocator
, size_t capacity
, size_t cellSize
, bool needsDestruction
)
48 : DoublyLinkedListNode
<MarkedBlock
>()
49 , m_atomsPerCell((cellSize
+ atomSize
- 1) / atomSize
)
50 , m_endAtom((allocator
->cellSize() ? atomsPerBlock
- m_atomsPerCell
: firstAtom()) + 1)
51 , m_capacity(capacity
)
52 , m_needsDestruction(needsDestruction
)
53 , m_allocator(allocator
)
54 , m_state(New
) // All cells start out unmarked.
55 , m_weakSet(allocator
->heap()->vm(), *this)
58 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
61 inline void MarkedBlock::callDestructor(JSCell
* cell
)
63 // A previous eager sweep may already have run cell's destructor.
67 ASSERT(cell
->structureID());
68 if (cell
->inlineTypeFlags() & StructureIsImmortal
)
69 cell
->structure(*vm())->classInfo()->methodTable
.destroy(cell
);
71 jsCast
<JSDestructibleObject
*>(cell
)->classInfo()->methodTable
.destroy(cell
);
75 template<MarkedBlock::BlockState blockState
, MarkedBlock::SweepMode sweepMode
, bool callDestructors
>
76 MarkedBlock::FreeList
MarkedBlock::specializedSweep()
78 ASSERT(blockState
!= Allocated
&& blockState
!= FreeListed
);
79 ASSERT(!(!callDestructors
&& sweepMode
== SweepOnly
));
81 SamplingRegion
samplingRegion((!callDestructors
&& blockState
!= New
) ? "Calling destructors" : "sweeping");
83 // This produces a free list that is ordered in reverse through the block.
84 // This is fine, since the allocation code makes no assumptions about the
85 // order of the free list.
88 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
89 if (blockState
== Marked
&& (m_marks
.get(i
) || (m_newlyAllocated
&& m_newlyAllocated
->get(i
))))
92 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
94 if (callDestructors
&& blockState
!= New
)
97 if (sweepMode
== SweepToFreeList
) {
98 FreeCell
* freeCell
= reinterpret_cast<FreeCell
*>(cell
);
99 freeCell
->next
= head
;
105 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
106 // otherwise we would lose information on what's currently alive.
107 if (sweepMode
== SweepToFreeList
&& m_newlyAllocated
)
108 m_newlyAllocated
= nullptr;
110 m_state
= ((sweepMode
== SweepToFreeList
) ? FreeListed
: Marked
);
111 return FreeList(head
, count
* cellSize());
114 MarkedBlock::FreeList
MarkedBlock::sweep(SweepMode sweepMode
)
116 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
120 if (sweepMode
== SweepOnly
&& !m_needsDestruction
)
123 if (m_needsDestruction
)
124 return sweepHelper
<true>(sweepMode
);
125 return sweepHelper
<false>(sweepMode
);
128 template<bool callDestructors
>
129 MarkedBlock::FreeList
MarkedBlock::sweepHelper(SweepMode sweepMode
)
133 ASSERT(sweepMode
== SweepToFreeList
);
134 return specializedSweep
<New
, SweepToFreeList
, callDestructors
>();
136 // Happens when a block transitions to fully allocated.
137 ASSERT(sweepMode
== SweepToFreeList
);
141 RELEASE_ASSERT_NOT_REACHED();
144 return sweepMode
== SweepToFreeList
145 ? specializedSweep
<Marked
, SweepToFreeList
, callDestructors
>()
146 : specializedSweep
<Marked
, SweepOnly
, callDestructors
>();
149 RELEASE_ASSERT_NOT_REACHED();
153 class SetNewlyAllocatedFunctor
: public MarkedBlock::VoidFunctor
{
155 SetNewlyAllocatedFunctor(MarkedBlock
* block
)
160 IterationStatus
operator()(JSCell
* cell
)
162 ASSERT(MarkedBlock::blockFor(cell
) == m_block
);
163 m_block
->setNewlyAllocated(cell
);
164 return IterationStatus::Continue
;
168 MarkedBlock
* m_block
;
171 void MarkedBlock::stopAllocating(const FreeList
& freeList
)
173 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
174 FreeCell
* head
= freeList
.head
;
176 if (m_state
== Marked
) {
177 // If the block is in the Marked state then we know that:
178 // 1) It was not used for allocation during the previous allocation cycle.
179 // 2) It may have dead objects, and we only know them to be dead by the
180 // fact that their mark bits are unset.
181 // Hence if the block is Marked we need to leave it Marked.
187 ASSERT(m_state
== FreeListed
);
189 // Roll back to a coherent state for Heap introspection. Cells newly
190 // allocated from our free list are not currently marked, so we need another
191 // way to tell what's live vs dead.
193 ASSERT(!m_newlyAllocated
);
194 m_newlyAllocated
= std::make_unique
<WTF::Bitmap
<atomsPerBlock
>>();
196 SetNewlyAllocatedFunctor
functor(this);
197 forEachCell(functor
);
200 for (FreeCell
* current
= head
; current
; current
= next
) {
201 next
= current
->next
;
202 reinterpret_cast<JSCell
*>(current
)->zap();
203 clearNewlyAllocated(current
);
209 void MarkedBlock::clearMarks()
212 if (heap()->operationInProgress() == JSC::EdenCollection
)
213 this->clearMarksWithCollectionType
<EdenCollection
>();
215 this->clearMarksWithCollectionType
<FullCollection
>();
217 this->clearMarksWithCollectionType
<FullCollection
>();
221 template <HeapOperation collectionType
>
222 void MarkedBlock::clearMarksWithCollectionType()
224 ASSERT(collectionType
== FullCollection
|| collectionType
== EdenCollection
);
225 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
227 ASSERT(m_state
!= New
&& m_state
!= FreeListed
);
228 if (collectionType
== FullCollection
) {
230 // This will become true at the end of the mark phase. We set it now to
231 // avoid an extra pass to do so later.
236 ASSERT(collectionType
== EdenCollection
);
237 // If a block was retired then there's no way an EdenCollection can un-retire it.
238 if (m_state
!= Retired
)
242 void MarkedBlock::lastChanceToFinalize()
244 m_weakSet
.lastChanceToFinalize();
246 clearNewlyAllocated();
247 clearMarksWithCollectionType
<FullCollection
>();
251 MarkedBlock::FreeList
MarkedBlock::resumeAllocating()
253 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
255 ASSERT(m_state
== Marked
);
257 if (!m_newlyAllocated
) {
258 // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
259 // when we last stopped allocation, so return an empty free list and stay in the Marked state.
263 // Re-create our free list from before stopping allocation.
264 return sweep(SweepToFreeList
);
267 void MarkedBlock::didRetireBlock(const FreeList
& freeList
)
269 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
270 FreeCell
* head
= freeList
.head
;
272 // Currently we don't notify the Heap that we're giving up on this block.
273 // The Heap might be able to make a better decision about how many bytes should
274 // be allocated before the next collection if it knew about this retired block.
275 // On the other hand we'll waste at most 10% of our Heap space between FullCollections
276 // and only under heavy fragmentation.
278 // We need to zap the free list when retiring a block so that we don't try to destroy
279 // previously destroyed objects when we re-sweep the block in the future.
281 for (FreeCell
* current
= head
; current
; current
= next
) {
282 next
= current
->next
;
283 reinterpret_cast<JSCell
*>(current
)->zap();
286 ASSERT(m_state
== FreeListed
);