]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/MarkedBlock.cpp
JavaScriptCore-1218.35.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
93a37866 29#include "IncrementalSweeper.h"
14957cd0 30#include "JSCell.h"
93a37866
A
31#include "JSDestructibleObject.h"
32#include "Operations.h"
14957cd0
A
33
34namespace JSC {
35
93a37866 36MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
14957cd0 37{
93a37866
A
38 ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
39 Region* region = block->region();
40 return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
6fe7ccc8
A
41}
42
93a37866
A
43MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
44 : HeapBlock<MarkedBlock>(region)
6fe7ccc8 45 , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
93a37866
A
46 , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
47 , m_destructorType(destructorType)
48 , m_allocator(allocator)
6fe7ccc8 49 , m_state(New) // All cells start out unmarked.
93a37866 50 , m_weakSet(allocator->heap()->vm())
6fe7ccc8 51{
93a37866 52 ASSERT(allocator);
6fe7ccc8
A
53 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
54}
55
56inline void MarkedBlock::callDestructor(JSCell* cell)
14957cd0 57{
6fe7ccc8
A
58 // A previous eager sweep may already have run cell's destructor.
59 if (cell->isZapped())
60 return;
61
62#if ENABLE(SIMPLE_HEAP_PROFILING)
63 m_heap->m_destroyedTypeCounts.countVPtr(vptr);
64#endif
14957cd0 65
93a37866 66 cell->methodTableForDestruction()->destroy(cell);
6fe7ccc8 67 cell->zap();
14957cd0
A
68}
69
93a37866 70template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
6fe7ccc8 71MarkedBlock::FreeList MarkedBlock::specializedSweep()
14957cd0 72{
6fe7ccc8 73 ASSERT(blockState != Allocated && blockState != FreeListed);
93a37866 74 ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
14957cd0 75
6fe7ccc8
A
76 // This produces a free list that is ordered in reverse through the block.
77 // This is fine, since the allocation code makes no assumptions about the
78 // order of the free list.
79 FreeCell* head = 0;
80 size_t count = 0;
14957cd0 81 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
93a37866 82 if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
14957cd0
A
83 continue;
84
6fe7ccc8 85 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
6fe7ccc8 86
93a37866 87 if (dtorType != MarkedBlock::None && blockState != New)
6fe7ccc8
A
88 callDestructor(cell);
89
90 if (sweepMode == SweepToFreeList) {
91 FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
92 freeCell->next = head;
93 head = freeCell;
94 ++count;
14957cd0 95 }
14957cd0 96 }
6fe7ccc8 97
93a37866
A
98 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
99 // otherwise we would lose information on what's currently alive.
100 if (sweepMode == SweepToFreeList && m_newlyAllocated)
101 m_newlyAllocated.clear();
102
103 m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
6fe7ccc8
A
104 return FreeList(head, count * cellSize());
105}
106
107MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
108{
109 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
110
93a37866
A
111 m_weakSet.sweep();
112
113 if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
6fe7ccc8
A
114 return FreeList();
115
93a37866
A
116 if (m_destructorType == MarkedBlock::ImmortalStructure)
117 return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
118 if (m_destructorType == MarkedBlock::Normal)
119 return sweepHelper<MarkedBlock::Normal>(sweepMode);
120 return sweepHelper<MarkedBlock::None>(sweepMode);
6fe7ccc8
A
121}
122
93a37866 123template<MarkedBlock::DestructorType dtorType>
6fe7ccc8
A
124MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
125{
126 switch (m_state) {
127 case New:
128 ASSERT(sweepMode == SweepToFreeList);
93a37866 129 return specializedSweep<New, SweepToFreeList, dtorType>();
6fe7ccc8
A
130 case FreeListed:
131 // Happens when a block transitions to fully allocated.
132 ASSERT(sweepMode == SweepToFreeList);
133 return FreeList();
134 case Allocated:
93a37866 135 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
136 return FreeList();
137 case Marked:
138 return sweepMode == SweepToFreeList
93a37866
A
139 ? specializedSweep<Marked, SweepToFreeList, dtorType>()
140 : specializedSweep<Marked, SweepOnly, dtorType>();
6fe7ccc8
A
141 }
142
93a37866 143 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
144 return FreeList();
145}
146
93a37866
A
147class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
148public:
149 SetNewlyAllocatedFunctor(MarkedBlock* block)
150 : m_block(block)
151 {
152 }
153
154 void operator()(JSCell* cell)
155 {
156 ASSERT(MarkedBlock::blockFor(cell) == m_block);
157 m_block->setNewlyAllocated(cell);
158 }
159
160private:
161 MarkedBlock* m_block;
162};
163
164void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
6fe7ccc8
A
165{
166 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
167 FreeCell* head = freeList.head;
168
169 if (m_state == Marked) {
170 // If the block is in the Marked state then we know that:
171 // 1) It was not used for allocation during the previous allocation cycle.
172 // 2) It may have dead objects, and we only know them to be dead by the
173 // fact that their mark bits are unset.
174 // Hence if the block is Marked we need to leave it Marked.
175
176 ASSERT(!head);
6fe7ccc8
A
177 return;
178 }
93a37866 179
6fe7ccc8
A
180 ASSERT(m_state == FreeListed);
181
182 // Roll back to a coherent state for Heap introspection. Cells newly
183 // allocated from our free list are not currently marked, so we need another
93a37866 184 // way to tell what's live vs dead.
6fe7ccc8 185
93a37866
A
186 ASSERT(!m_newlyAllocated);
187 m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
188
189 SetNewlyAllocatedFunctor functor(this);
190 forEachCell(functor);
191
6fe7ccc8
A
192 FreeCell* next;
193 for (FreeCell* current = head; current; current = next) {
194 next = current->next;
195 reinterpret_cast<JSCell*>(current)->zap();
93a37866 196 clearNewlyAllocated(current);
6fe7ccc8
A
197 }
198
93a37866 199 m_state = Marked;
14957cd0
A
200}
201
202} // namespace JSC