2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedBlock.h"
31 #include "ScopeChain.h"
35 MarkedBlock
* MarkedBlock::create(Heap
* heap
, size_t cellSize
, bool cellsNeedDestruction
)
37 PageAllocationAligned allocation
= PageAllocationAligned::allocate(blockSize
, blockSize
, OSAllocator::JSGCHeapPages
);
38 if (!static_cast<bool>(allocation
))
40 return new (NotNull
, allocation
.base()) MarkedBlock(allocation
, heap
, cellSize
, cellsNeedDestruction
);
43 MarkedBlock
* MarkedBlock::recycle(MarkedBlock
* block
, Heap
* heap
, size_t cellSize
, bool cellsNeedDestruction
)
45 return new (NotNull
, block
) MarkedBlock(block
->m_allocation
, heap
, cellSize
, cellsNeedDestruction
);
48 void MarkedBlock::destroy(MarkedBlock
* block
)
50 block
->m_allocation
.deallocate();
53 MarkedBlock::MarkedBlock(PageAllocationAligned
& allocation
, Heap
* heap
, size_t cellSize
, bool cellsNeedDestruction
)
54 : HeapBlock(allocation
)
55 , m_atomsPerCell((cellSize
+ atomSize
- 1) / atomSize
)
56 , m_endAtom(atomsPerBlock
- m_atomsPerCell
+ 1)
57 , m_cellsNeedDestruction(cellsNeedDestruction
)
58 , m_state(New
) // All cells start out unmarked.
62 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
65 inline void MarkedBlock::callDestructor(JSCell
* cell
)
67 // A previous eager sweep may already have run cell's destructor.
71 #if ENABLE(SIMPLE_HEAP_PROFILING)
72 m_heap
->m_destroyedTypeCounts
.countVPtr(vptr
);
74 cell
->methodTable()->destroy(cell
);
79 template<MarkedBlock::BlockState blockState
, MarkedBlock::SweepMode sweepMode
, bool destructorCallNeeded
>
80 MarkedBlock::FreeList
MarkedBlock::specializedSweep()
82 ASSERT(blockState
!= Allocated
&& blockState
!= FreeListed
);
83 ASSERT(destructorCallNeeded
|| sweepMode
!= SweepOnly
);
85 // This produces a free list that is ordered in reverse through the block.
86 // This is fine, since the allocation code makes no assumptions about the
87 // order of the free list.
90 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
91 if (blockState
== Marked
&& m_marks
.get(i
))
94 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
95 if (blockState
== Zapped
&& !cell
->isZapped())
98 if (destructorCallNeeded
&& blockState
!= New
)
101 if (sweepMode
== SweepToFreeList
) {
102 FreeCell
* freeCell
= reinterpret_cast<FreeCell
*>(cell
);
103 freeCell
->next
= head
;
109 m_state
= ((sweepMode
== SweepToFreeList
) ? FreeListed
: Zapped
);
110 return FreeList(head
, count
* cellSize());
113 MarkedBlock::FreeList
MarkedBlock::sweep(SweepMode sweepMode
)
115 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
117 if (sweepMode
== SweepOnly
&& !m_cellsNeedDestruction
)
120 if (m_cellsNeedDestruction
)
121 return sweepHelper
<true>(sweepMode
);
122 return sweepHelper
<false>(sweepMode
);
125 template<bool destructorCallNeeded
>
126 MarkedBlock::FreeList
MarkedBlock::sweepHelper(SweepMode sweepMode
)
130 ASSERT(sweepMode
== SweepToFreeList
);
131 return specializedSweep
<New
, SweepToFreeList
, destructorCallNeeded
>();
133 // Happens when a block transitions to fully allocated.
134 ASSERT(sweepMode
== SweepToFreeList
);
137 ASSERT_NOT_REACHED();
140 return sweepMode
== SweepToFreeList
141 ? specializedSweep
<Marked
, SweepToFreeList
, destructorCallNeeded
>()
142 : specializedSweep
<Marked
, SweepOnly
, destructorCallNeeded
>();
144 return sweepMode
== SweepToFreeList
145 ? specializedSweep
<Zapped
, SweepToFreeList
, destructorCallNeeded
>()
146 : specializedSweep
<Zapped
, SweepOnly
, destructorCallNeeded
>();
149 ASSERT_NOT_REACHED();
153 void MarkedBlock::zapFreeList(const FreeList
& freeList
)
155 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
156 FreeCell
* head
= freeList
.head
;
158 if (m_state
== Marked
) {
159 // If the block is in the Marked state then we know that:
160 // 1) It was not used for allocation during the previous allocation cycle.
161 // 2) It may have dead objects, and we only know them to be dead by the
162 // fact that their mark bits are unset.
163 // Hence if the block is Marked we need to leave it Marked.
170 if (m_state
== Zapped
) {
171 // If the block is in the Zapped state then we know that someone already
172 // zapped it for us. This could not have happened during a GC, but might
173 // be the result of someone having done a GC scan to perform some operation
174 // over all live objects (or all live blocks). It also means that somebody
175 // had allocated in this block since the last GC, swept all dead objects
176 // onto the free list, left the block in the FreeListed state, then the heap
177 // scan happened, and canonicalized the block, leading to all dead objects
178 // being zapped. Therefore, it is safe for us to simply do nothing, since
179 // dead objects will have 0 in their vtables and live objects will have
180 // non-zero vtables, which is consistent with the block being zapped.
187 ASSERT(m_state
== FreeListed
);
189 // Roll back to a coherent state for Heap introspection. Cells newly
190 // allocated from our free list are not currently marked, so we need another
191 // way to tell what's live vs dead. We use zapping for that.
194 for (FreeCell
* current
= head
; current
; current
= next
) {
195 next
= current
->next
;
196 reinterpret_cast<JSCell
*>(current
)->zap();