]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/MarkedBlock.cpp
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
29#include "JSCell.h"
30#include "JSObject.h"
14957cd0
A
31#include "ScopeChain.h"
32
33namespace JSC {
34
6fe7ccc8 35MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction)
14957cd0
A
36{
37 PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
38 if (!static_cast<bool>(allocation))
39 CRASH();
6fe7ccc8
A
40 return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction);
41}
42
43MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
44{
45 return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction);
14957cd0
A
46}
47
48void MarkedBlock::destroy(MarkedBlock* block)
49{
14957cd0
A
50 block->m_allocation.deallocate();
51}
52
6fe7ccc8
A
53MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
54 : HeapBlock(allocation)
55 , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
56 , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
57 , m_cellsNeedDestruction(cellsNeedDestruction)
58 , m_state(New) // All cells start out unmarked.
59 , m_heap(heap)
60{
61 ASSERT(heap);
62 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
63}
64
65inline void MarkedBlock::callDestructor(JSCell* cell)
14957cd0 66{
6fe7ccc8
A
67 // A previous eager sweep may already have run cell's destructor.
68 if (cell->isZapped())
69 return;
70
71#if ENABLE(SIMPLE_HEAP_PROFILING)
72 m_heap->m_destroyedTypeCounts.countVPtr(vptr);
73#endif
74 cell->methodTable()->destroy(cell);
14957cd0 75
6fe7ccc8 76 cell->zap();
14957cd0
A
77}
78
6fe7ccc8
A
79template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
80MarkedBlock::FreeList MarkedBlock::specializedSweep()
14957cd0 81{
6fe7ccc8
A
82 ASSERT(blockState != Allocated && blockState != FreeListed);
83 ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
14957cd0 84
6fe7ccc8
A
85 // This produces a free list that is ordered in reverse through the block.
86 // This is fine, since the allocation code makes no assumptions about the
87 // order of the free list.
88 FreeCell* head = 0;
89 size_t count = 0;
14957cd0 90 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
6fe7ccc8 91 if (blockState == Marked && m_marks.get(i))
14957cd0
A
92 continue;
93
6fe7ccc8
A
94 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
95 if (blockState == Zapped && !cell->isZapped())
96 continue;
97
98 if (destructorCallNeeded && blockState != New)
99 callDestructor(cell);
100
101 if (sweepMode == SweepToFreeList) {
102 FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
103 freeCell->next = head;
104 head = freeCell;
105 ++count;
14957cd0 106 }
14957cd0 107 }
6fe7ccc8
A
108
109 m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
110 return FreeList(head, count * cellSize());
111}
112
113MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
114{
115 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
116
117 if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
118 return FreeList();
119
120 if (m_cellsNeedDestruction)
121 return sweepHelper<true>(sweepMode);
122 return sweepHelper<false>(sweepMode);
123}
124
125template<bool destructorCallNeeded>
126MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
127{
128 switch (m_state) {
129 case New:
130 ASSERT(sweepMode == SweepToFreeList);
131 return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
132 case FreeListed:
133 // Happens when a block transitions to fully allocated.
134 ASSERT(sweepMode == SweepToFreeList);
135 return FreeList();
136 case Allocated:
137 ASSERT_NOT_REACHED();
138 return FreeList();
139 case Marked:
140 return sweepMode == SweepToFreeList
141 ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
142 : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
143 case Zapped:
144 return sweepMode == SweepToFreeList
145 ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
146 : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
147 }
148
149 ASSERT_NOT_REACHED();
150 return FreeList();
151}
152
153void MarkedBlock::zapFreeList(const FreeList& freeList)
154{
155 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
156 FreeCell* head = freeList.head;
157
158 if (m_state == Marked) {
159 // If the block is in the Marked state then we know that:
160 // 1) It was not used for allocation during the previous allocation cycle.
161 // 2) It may have dead objects, and we only know them to be dead by the
162 // fact that their mark bits are unset.
163 // Hence if the block is Marked we need to leave it Marked.
164
165 ASSERT(!head);
166
167 return;
168 }
169
170 if (m_state == Zapped) {
171 // If the block is in the Zapped state then we know that someone already
172 // zapped it for us. This could not have happened during a GC, but might
173 // be the result of someone having done a GC scan to perform some operation
174 // over all live objects (or all live blocks). It also means that somebody
175 // had allocated in this block since the last GC, swept all dead objects
176 // onto the free list, left the block in the FreeListed state, then the heap
177 // scan happened, and canonicalized the block, leading to all dead objects
178 // being zapped. Therefore, it is safe for us to simply do nothing, since
179 // dead objects will have 0 in their vtables and live objects will have
180 // non-zero vtables, which is consistent with the block being zapped.
181
182 ASSERT(!head);
183
184 return;
185 }
186
187 ASSERT(m_state == FreeListed);
188
189 // Roll back to a coherent state for Heap introspection. Cells newly
190 // allocated from our free list are not currently marked, so we need another
191 // way to tell what's live vs dead. We use zapping for that.
192
193 FreeCell* next;
194 for (FreeCell* current = head; current; current = next) {
195 next = current->next;
196 reinterpret_cast<JSCell*>(current)->zap();
197 }
198
199 m_state = Zapped;
14957cd0
A
200}
201
202} // namespace JSC