2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedBlock.h"
29 #include "IncrementalSweeper.h"
31 #include "JSDestructibleObject.h"
32 #include "Operations.h"
36 MarkedBlock
* MarkedBlock::create(DeadBlock
* block
, MarkedAllocator
* allocator
, size_t cellSize
, DestructorType destructorType
)
38 ASSERT(reinterpret_cast<size_t>(block
) == (reinterpret_cast<size_t>(block
) & blockMask
));
39 Region
* region
= block
->region();
40 return new (NotNull
, block
) MarkedBlock(region
, allocator
, cellSize
, destructorType
);
43 MarkedBlock::MarkedBlock(Region
* region
, MarkedAllocator
* allocator
, size_t cellSize
, DestructorType destructorType
)
44 : HeapBlock
<MarkedBlock
>(region
)
45 , m_atomsPerCell((cellSize
+ atomSize
- 1) / atomSize
)
46 , m_endAtom((allocator
->cellSize() ? atomsPerBlock
: region
->blockSize() / atomSize
) - m_atomsPerCell
+ 1)
47 , m_destructorType(destructorType
)
48 , m_allocator(allocator
)
49 , m_state(New
) // All cells start out unmarked.
50 , m_weakSet(allocator
->heap()->vm())
53 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
56 inline void MarkedBlock::callDestructor(JSCell
* cell
)
58 // A previous eager sweep may already have run cell's destructor.
62 #if ENABLE(SIMPLE_HEAP_PROFILING)
63 m_heap
->m_destroyedTypeCounts
.countVPtr(vptr
);
66 cell
->methodTableForDestruction()->destroy(cell
);
70 template<MarkedBlock::BlockState blockState
, MarkedBlock::SweepMode sweepMode
, MarkedBlock::DestructorType dtorType
>
71 MarkedBlock::FreeList
MarkedBlock::specializedSweep()
73 ASSERT(blockState
!= Allocated
&& blockState
!= FreeListed
);
74 ASSERT(!(dtorType
== MarkedBlock::None
&& sweepMode
== SweepOnly
));
76 // This produces a free list that is ordered in reverse through the block.
77 // This is fine, since the allocation code makes no assumptions about the
78 // order of the free list.
81 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
82 if (blockState
== Marked
&& (m_marks
.get(i
) || (m_newlyAllocated
&& m_newlyAllocated
->get(i
))))
85 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
87 if (dtorType
!= MarkedBlock::None
&& blockState
!= New
)
90 if (sweepMode
== SweepToFreeList
) {
91 FreeCell
* freeCell
= reinterpret_cast<FreeCell
*>(cell
);
92 freeCell
->next
= head
;
98 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
99 // otherwise we would lose information on what's currently alive.
100 if (sweepMode
== SweepToFreeList
&& m_newlyAllocated
)
101 m_newlyAllocated
.clear();
103 m_state
= ((sweepMode
== SweepToFreeList
) ? FreeListed
: Marked
);
104 return FreeList(head
, count
* cellSize());
107 MarkedBlock::FreeList
MarkedBlock::sweep(SweepMode sweepMode
)
109 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
113 if (sweepMode
== SweepOnly
&& m_destructorType
== MarkedBlock::None
)
116 if (m_destructorType
== MarkedBlock::ImmortalStructure
)
117 return sweepHelper
<MarkedBlock::ImmortalStructure
>(sweepMode
);
118 if (m_destructorType
== MarkedBlock::Normal
)
119 return sweepHelper
<MarkedBlock::Normal
>(sweepMode
);
120 return sweepHelper
<MarkedBlock::None
>(sweepMode
);
123 template<MarkedBlock::DestructorType dtorType
>
124 MarkedBlock::FreeList
MarkedBlock::sweepHelper(SweepMode sweepMode
)
128 ASSERT(sweepMode
== SweepToFreeList
);
129 return specializedSweep
<New
, SweepToFreeList
, dtorType
>();
131 // Happens when a block transitions to fully allocated.
132 ASSERT(sweepMode
== SweepToFreeList
);
135 RELEASE_ASSERT_NOT_REACHED();
138 return sweepMode
== SweepToFreeList
139 ? specializedSweep
<Marked
, SweepToFreeList
, dtorType
>()
140 : specializedSweep
<Marked
, SweepOnly
, dtorType
>();
143 RELEASE_ASSERT_NOT_REACHED();
147 class SetNewlyAllocatedFunctor
: public MarkedBlock::VoidFunctor
{
149 SetNewlyAllocatedFunctor(MarkedBlock
* block
)
154 void operator()(JSCell
* cell
)
156 ASSERT(MarkedBlock::blockFor(cell
) == m_block
);
157 m_block
->setNewlyAllocated(cell
);
161 MarkedBlock
* m_block
;
164 void MarkedBlock::canonicalizeCellLivenessData(const FreeList
& freeList
)
166 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
167 FreeCell
* head
= freeList
.head
;
169 if (m_state
== Marked
) {
170 // If the block is in the Marked state then we know that:
171 // 1) It was not used for allocation during the previous allocation cycle.
172 // 2) It may have dead objects, and we only know them to be dead by the
173 // fact that their mark bits are unset.
174 // Hence if the block is Marked we need to leave it Marked.
180 ASSERT(m_state
== FreeListed
);
182 // Roll back to a coherent state for Heap introspection. Cells newly
183 // allocated from our free list are not currently marked, so we need another
184 // way to tell what's live vs dead.
186 ASSERT(!m_newlyAllocated
);
187 m_newlyAllocated
= adoptPtr(new WTF::Bitmap
<atomsPerBlock
>());
189 SetNewlyAllocatedFunctor
functor(this);
190 forEachCell(functor
);
193 for (FreeCell
* current
= head
; current
; current
= next
) {
194 next
= current
->next
;
195 reinterpret_cast<JSCell
*>(current
)->zap();
196 clearNewlyAllocated(current
);