]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/MarkedBlock.cpp
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
81345200 29#include "DelayedReleaseScope.h"
93a37866 30#include "IncrementalSweeper.h"
14957cd0 31#include "JSCell.h"
93a37866 32#include "JSDestructibleObject.h"
81345200 33#include "JSCInlines.h"
14957cd0
A
34
35namespace JSC {
36
93a37866 37MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
14957cd0 38{
93a37866
A
39 ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
40 Region* region = block->region();
41 return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
6fe7ccc8
A
42}
43
93a37866
A
44MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
45 : HeapBlock<MarkedBlock>(region)
6fe7ccc8 46 , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
93a37866
A
47 , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
48 , m_destructorType(destructorType)
49 , m_allocator(allocator)
6fe7ccc8 50 , m_state(New) // All cells start out unmarked.
93a37866 51 , m_weakSet(allocator->heap()->vm())
6fe7ccc8 52{
93a37866 53 ASSERT(allocator);
6fe7ccc8
A
54 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
55}
56
81345200 57template<MarkedBlock::DestructorType dtorType>
6fe7ccc8 58inline void MarkedBlock::callDestructor(JSCell* cell)
14957cd0 59{
6fe7ccc8
A
60 // A previous eager sweep may already have run cell's destructor.
61 if (cell->isZapped())
62 return;
63
81345200
A
64 if (dtorType == MarkedBlock::Normal)
65 jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
66 else
67 cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
6fe7ccc8 68 cell->zap();
14957cd0
A
69}
70
93a37866 71template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
6fe7ccc8 72MarkedBlock::FreeList MarkedBlock::specializedSweep()
14957cd0 73{
6fe7ccc8 74 ASSERT(blockState != Allocated && blockState != FreeListed);
93a37866 75 ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
14957cd0 76
6fe7ccc8
A
77 // This produces a free list that is ordered in reverse through the block.
78 // This is fine, since the allocation code makes no assumptions about the
79 // order of the free list.
80 FreeCell* head = 0;
81 size_t count = 0;
14957cd0 82 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
93a37866 83 if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
14957cd0
A
84 continue;
85
6fe7ccc8 86 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
6fe7ccc8 87
93a37866 88 if (dtorType != MarkedBlock::None && blockState != New)
81345200 89 callDestructor<dtorType>(cell);
6fe7ccc8
A
90
91 if (sweepMode == SweepToFreeList) {
92 FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
93 freeCell->next = head;
94 head = freeCell;
95 ++count;
14957cd0 96 }
14957cd0 97 }
6fe7ccc8 98
93a37866
A
99 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
100 // otherwise we would lose information on what's currently alive.
101 if (sweepMode == SweepToFreeList && m_newlyAllocated)
102 m_newlyAllocated.clear();
103
104 m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
6fe7ccc8
A
105 return FreeList(head, count * cellSize());
106}
107
108MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
109{
81345200 110 ASSERT(DelayedReleaseScope::isInEffectFor(heap()->m_objectSpace));
6fe7ccc8
A
111 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
112
93a37866
A
113 m_weakSet.sweep();
114
115 if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
6fe7ccc8
A
116 return FreeList();
117
93a37866
A
118 if (m_destructorType == MarkedBlock::ImmortalStructure)
119 return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
120 if (m_destructorType == MarkedBlock::Normal)
121 return sweepHelper<MarkedBlock::Normal>(sweepMode);
122 return sweepHelper<MarkedBlock::None>(sweepMode);
6fe7ccc8
A
123}
124
93a37866 125template<MarkedBlock::DestructorType dtorType>
6fe7ccc8
A
126MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
127{
128 switch (m_state) {
129 case New:
130 ASSERT(sweepMode == SweepToFreeList);
93a37866 131 return specializedSweep<New, SweepToFreeList, dtorType>();
6fe7ccc8
A
132 case FreeListed:
133 // Happens when a block transitions to fully allocated.
134 ASSERT(sweepMode == SweepToFreeList);
135 return FreeList();
81345200 136 case Retired:
6fe7ccc8 137 case Allocated:
93a37866 138 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
139 return FreeList();
140 case Marked:
141 return sweepMode == SweepToFreeList
93a37866
A
142 ? specializedSweep<Marked, SweepToFreeList, dtorType>()
143 : specializedSweep<Marked, SweepOnly, dtorType>();
6fe7ccc8
A
144 }
145
93a37866 146 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
147 return FreeList();
148}
149
93a37866
A
150class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
151public:
152 SetNewlyAllocatedFunctor(MarkedBlock* block)
153 : m_block(block)
154 {
155 }
156
157 void operator()(JSCell* cell)
158 {
159 ASSERT(MarkedBlock::blockFor(cell) == m_block);
160 m_block->setNewlyAllocated(cell);
161 }
162
163private:
164 MarkedBlock* m_block;
165};
166
81345200 167void MarkedBlock::stopAllocating(const FreeList& freeList)
6fe7ccc8
A
168{
169 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
170 FreeCell* head = freeList.head;
171
172 if (m_state == Marked) {
173 // If the block is in the Marked state then we know that:
174 // 1) It was not used for allocation during the previous allocation cycle.
175 // 2) It may have dead objects, and we only know them to be dead by the
176 // fact that their mark bits are unset.
177 // Hence if the block is Marked we need to leave it Marked.
178
179 ASSERT(!head);
6fe7ccc8
A
180 return;
181 }
93a37866 182
6fe7ccc8
A
183 ASSERT(m_state == FreeListed);
184
185 // Roll back to a coherent state for Heap introspection. Cells newly
186 // allocated from our free list are not currently marked, so we need another
93a37866 187 // way to tell what's live vs dead.
6fe7ccc8 188
93a37866
A
189 ASSERT(!m_newlyAllocated);
190 m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
191
192 SetNewlyAllocatedFunctor functor(this);
193 forEachCell(functor);
194
6fe7ccc8
A
195 FreeCell* next;
196 for (FreeCell* current = head; current; current = next) {
197 next = current->next;
198 reinterpret_cast<JSCell*>(current)->zap();
93a37866 199 clearNewlyAllocated(current);
6fe7ccc8
A
200 }
201
93a37866 202 m_state = Marked;
14957cd0
A
203}
204
81345200
A
205void MarkedBlock::clearMarks()
206{
207#if ENABLE(GGC)
208 if (heap()->operationInProgress() == JSC::EdenCollection)
209 this->clearMarksWithCollectionType<EdenCollection>();
210 else
211 this->clearMarksWithCollectionType<FullCollection>();
212#else
213 this->clearMarksWithCollectionType<FullCollection>();
214#endif
215}
216
217void MarkedBlock::clearRememberedSet()
218{
219 m_rememberedSet.clearAll();
220}
221
222template <HeapOperation collectionType>
223void MarkedBlock::clearMarksWithCollectionType()
224{
225 ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
226 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
227
228 ASSERT(m_state != New && m_state != FreeListed);
229 if (collectionType == FullCollection) {
230 m_marks.clearAll();
231#if ENABLE(GGC)
232 m_rememberedSet.clearAll();
233#endif
234 // This will become true at the end of the mark phase. We set it now to
235 // avoid an extra pass to do so later.
236 m_state = Marked;
237 return;
238 }
239
240 ASSERT(collectionType == EdenCollection);
241 // If a block was retired then there's no way an EdenCollection can un-retire it.
242 if (m_state != Retired)
243 m_state = Marked;
244}
245
246void MarkedBlock::lastChanceToFinalize()
247{
248 m_weakSet.lastChanceToFinalize();
249
250 clearNewlyAllocated();
251 clearMarksWithCollectionType<FullCollection>();
252 sweep();
253}
254
255MarkedBlock::FreeList MarkedBlock::resumeAllocating()
256{
257 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
258
259 ASSERT(m_state == Marked);
260
261 if (!m_newlyAllocated) {
262 // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
263 // when we last stopped allocation, so return an empty free list and stay in the Marked state.
264 return FreeList();
265 }
266
267 // Re-create our free list from before stopping allocation.
268 return sweep(SweepToFreeList);
269}
270
271void MarkedBlock::didRetireBlock(const FreeList& freeList)
272{
273 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
274 FreeCell* head = freeList.head;
275
276 // Currently we don't notify the Heap that we're giving up on this block.
277 // The Heap might be able to make a better decision about how many bytes should
278 // be allocated before the next collection if it knew about this retired block.
279 // On the other hand we'll waste at most 10% of our Heap space between FullCollections
280 // and only under heavy fragmentation.
281
282 // We need to zap the free list when retiring a block so that we don't try to destroy
283 // previously destroyed objects when we re-sweep the block in the future.
284 FreeCell* next;
285 for (FreeCell* current = head; current; current = next) {
286 next = current->next;
287 reinterpret_cast<JSCell*>(current)->zap();
288 }
289
290 ASSERT(m_state == FreeListed);
291 m_state = Retired;
292}
293
14957cd0 294} // namespace JSC