2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedBlock.h"
29 #include "DelayedReleaseScope.h"
30 #include "IncrementalSweeper.h"
32 #include "JSDestructibleObject.h"
33 #include "JSCInlines.h"
37 MarkedBlock
* MarkedBlock::create(DeadBlock
* block
, MarkedAllocator
* allocator
, size_t cellSize
, DestructorType destructorType
)
39 ASSERT(reinterpret_cast<size_t>(block
) == (reinterpret_cast<size_t>(block
) & blockMask
));
40 Region
* region
= block
->region();
41 return new (NotNull
, block
) MarkedBlock(region
, allocator
, cellSize
, destructorType
);
44 MarkedBlock::MarkedBlock(Region
* region
, MarkedAllocator
* allocator
, size_t cellSize
, DestructorType destructorType
)
45 : HeapBlock
<MarkedBlock
>(region
)
46 , m_atomsPerCell((cellSize
+ atomSize
- 1) / atomSize
)
47 , m_endAtom((allocator
->cellSize() ? atomsPerBlock
: region
->blockSize() / atomSize
) - m_atomsPerCell
+ 1)
48 , m_destructorType(destructorType
)
49 , m_allocator(allocator
)
50 , m_state(New
) // All cells start out unmarked.
51 , m_weakSet(allocator
->heap()->vm())
54 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
57 template<MarkedBlock::DestructorType dtorType
>
58 inline void MarkedBlock::callDestructor(JSCell
* cell
)
60 // A previous eager sweep may already have run cell's destructor.
64 if (dtorType
== MarkedBlock::Normal
)
65 jsCast
<JSDestructibleObject
*>(cell
)->classInfo()->methodTable
.destroy(cell
);
67 cell
->structure(*vm())->classInfo()->methodTable
.destroy(cell
);
71 template<MarkedBlock::BlockState blockState
, MarkedBlock::SweepMode sweepMode
, MarkedBlock::DestructorType dtorType
>
72 MarkedBlock::FreeList
MarkedBlock::specializedSweep()
74 ASSERT(blockState
!= Allocated
&& blockState
!= FreeListed
);
75 ASSERT(!(dtorType
== MarkedBlock::None
&& sweepMode
== SweepOnly
));
77 // This produces a free list that is ordered in reverse through the block.
78 // This is fine, since the allocation code makes no assumptions about the
79 // order of the free list.
82 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
83 if (blockState
== Marked
&& (m_marks
.get(i
) || (m_newlyAllocated
&& m_newlyAllocated
->get(i
))))
86 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
88 if (dtorType
!= MarkedBlock::None
&& blockState
!= New
)
89 callDestructor
<dtorType
>(cell
);
91 if (sweepMode
== SweepToFreeList
) {
92 FreeCell
* freeCell
= reinterpret_cast<FreeCell
*>(cell
);
93 freeCell
->next
= head
;
99 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
100 // otherwise we would lose information on what's currently alive.
101 if (sweepMode
== SweepToFreeList
&& m_newlyAllocated
)
102 m_newlyAllocated
.clear();
104 m_state
= ((sweepMode
== SweepToFreeList
) ? FreeListed
: Marked
);
105 return FreeList(head
, count
* cellSize());
108 MarkedBlock::FreeList
MarkedBlock::sweep(SweepMode sweepMode
)
110 ASSERT(DelayedReleaseScope::isInEffectFor(heap()->m_objectSpace
));
111 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
115 if (sweepMode
== SweepOnly
&& m_destructorType
== MarkedBlock::None
)
118 if (m_destructorType
== MarkedBlock::ImmortalStructure
)
119 return sweepHelper
<MarkedBlock::ImmortalStructure
>(sweepMode
);
120 if (m_destructorType
== MarkedBlock::Normal
)
121 return sweepHelper
<MarkedBlock::Normal
>(sweepMode
);
122 return sweepHelper
<MarkedBlock::None
>(sweepMode
);
125 template<MarkedBlock::DestructorType dtorType
>
126 MarkedBlock::FreeList
MarkedBlock::sweepHelper(SweepMode sweepMode
)
130 ASSERT(sweepMode
== SweepToFreeList
);
131 return specializedSweep
<New
, SweepToFreeList
, dtorType
>();
133 // Happens when a block transitions to fully allocated.
134 ASSERT(sweepMode
== SweepToFreeList
);
138 RELEASE_ASSERT_NOT_REACHED();
141 return sweepMode
== SweepToFreeList
142 ? specializedSweep
<Marked
, SweepToFreeList
, dtorType
>()
143 : specializedSweep
<Marked
, SweepOnly
, dtorType
>();
146 RELEASE_ASSERT_NOT_REACHED();
150 class SetNewlyAllocatedFunctor
: public MarkedBlock::VoidFunctor
{
152 SetNewlyAllocatedFunctor(MarkedBlock
* block
)
157 void operator()(JSCell
* cell
)
159 ASSERT(MarkedBlock::blockFor(cell
) == m_block
);
160 m_block
->setNewlyAllocated(cell
);
164 MarkedBlock
* m_block
;
167 void MarkedBlock::stopAllocating(const FreeList
& freeList
)
169 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
170 FreeCell
* head
= freeList
.head
;
172 if (m_state
== Marked
) {
173 // If the block is in the Marked state then we know that:
174 // 1) It was not used for allocation during the previous allocation cycle.
175 // 2) It may have dead objects, and we only know them to be dead by the
176 // fact that their mark bits are unset.
177 // Hence if the block is Marked we need to leave it Marked.
183 ASSERT(m_state
== FreeListed
);
185 // Roll back to a coherent state for Heap introspection. Cells newly
186 // allocated from our free list are not currently marked, so we need another
187 // way to tell what's live vs dead.
189 ASSERT(!m_newlyAllocated
);
190 m_newlyAllocated
= adoptPtr(new WTF::Bitmap
<atomsPerBlock
>());
192 SetNewlyAllocatedFunctor
functor(this);
193 forEachCell(functor
);
196 for (FreeCell
* current
= head
; current
; current
= next
) {
197 next
= current
->next
;
198 reinterpret_cast<JSCell
*>(current
)->zap();
199 clearNewlyAllocated(current
);
205 void MarkedBlock::clearMarks()
208 if (heap()->operationInProgress() == JSC::EdenCollection
)
209 this->clearMarksWithCollectionType
<EdenCollection
>();
211 this->clearMarksWithCollectionType
<FullCollection
>();
213 this->clearMarksWithCollectionType
<FullCollection
>();
217 void MarkedBlock::clearRememberedSet()
219 m_rememberedSet
.clearAll();
222 template <HeapOperation collectionType
>
223 void MarkedBlock::clearMarksWithCollectionType()
225 ASSERT(collectionType
== FullCollection
|| collectionType
== EdenCollection
);
226 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
228 ASSERT(m_state
!= New
&& m_state
!= FreeListed
);
229 if (collectionType
== FullCollection
) {
232 m_rememberedSet
.clearAll();
234 // This will become true at the end of the mark phase. We set it now to
235 // avoid an extra pass to do so later.
240 ASSERT(collectionType
== EdenCollection
);
241 // If a block was retired then there's no way an EdenCollection can un-retire it.
242 if (m_state
!= Retired
)
246 void MarkedBlock::lastChanceToFinalize()
248 m_weakSet
.lastChanceToFinalize();
250 clearNewlyAllocated();
251 clearMarksWithCollectionType
<FullCollection
>();
255 MarkedBlock::FreeList
MarkedBlock::resumeAllocating()
257 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
259 ASSERT(m_state
== Marked
);
261 if (!m_newlyAllocated
) {
262 // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
263 // when we last stopped allocation, so return an empty free list and stay in the Marked state.
267 // Re-create our free list from before stopping allocation.
268 return sweep(SweepToFreeList
);
271 void MarkedBlock::didRetireBlock(const FreeList
& freeList
)
273 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
274 FreeCell
* head
= freeList
.head
;
276 // Currently we don't notify the Heap that we're giving up on this block.
277 // The Heap might be able to make a better decision about how many bytes should
278 // be allocated before the next collection if it knew about this retired block.
279 // On the other hand we'll waste at most 10% of our Heap space between FullCollections
280 // and only under heavy fragmentation.
282 // We need to zap the free list when retiring a block so that we don't try to destroy
283 // previously destroyed objects when we re-sweep the block in the future.
285 for (FreeCell
* current
= head
; current
; current
= next
) {
286 next
= current
->next
;
287 reinterpret_cast<JSCell
*>(current
)->zap();
290 ASSERT(m_state
== FreeListed
);