]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/MarkedBlock.h
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.h
1 /*
2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22 #ifndef MarkedBlock_h
23 #define MarkedBlock_h
24
25 #include "CardSet.h"
26 #include "HeapBlock.h"
27
28 #include <wtf/Bitmap.h>
29 #include <wtf/DataLog.h>
30 #include <wtf/DoublyLinkedList.h>
31 #include <wtf/HashFunctions.h>
32 #include <wtf/PageAllocationAligned.h>
33 #include <wtf/StdLibExtras.h>
34 #include <wtf/Vector.h>
35
36 // Set to log state transitions of blocks.
37 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
38
39 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
40 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
41 dataLog( \
42 "%s:%d %s: block %s = %p, %d\n", \
43 __FILE__, __LINE__, __FUNCTION__, \
44 #block, (block), (block)->m_state); \
45 } while (false)
46 #else
47 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
48 #endif
49
50 namespace JSC {
51
52 class Heap;
53 class JSCell;
54
55 typedef uintptr_t Bits;
56
57 static const size_t MB = 1024 * 1024;
58
59 bool isZapped(const JSCell*);
60
61 // A marked block is a page-aligned container for heap-allocated objects.
62 // Objects are allocated within cells of the marked block. For a given
63 // marked block, all cells have the same size. Objects smaller than the
64 // cell size may be allocated in the marked block, in which case the
65 // allocation suffers from internal fragmentation: wasted space whose
66 // size is equal to the difference between the cell size and the object
67 // size.
68
69 class MarkedBlock : public HeapBlock {
70 friend class WTF::DoublyLinkedListNode<MarkedBlock>;
71 public:
72 // Ensure natural alignment for native types whilst recognizing that the smallest
73 // object the heap will commonly allocate is four words.
74 static const size_t atomSize = 4 * sizeof(void*);
75 static const size_t atomShift = 5;
76 static const size_t blockSize = 64 * KB;
77 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
78
79 static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
80 static const size_t atomMask = atomsPerBlock - 1;
81 static const int cardShift = 8; // This is log2 of bytes per card.
82 static const size_t bytesPerCard = 1 << cardShift;
83 static const int cardCount = blockSize / bytesPerCard;
84 static const int cardMask = cardCount - 1;
85
86 struct FreeCell {
87 FreeCell* next;
88 };
89
90 struct FreeList {
91 FreeCell* head;
92 size_t bytes;
93
94 FreeList();
95 FreeList(FreeCell*, size_t);
96 };
97
98 struct VoidFunctor {
99 typedef void ReturnType;
100 void returnValue() { }
101 };
102
103 static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction);
104 static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction);
105 static void destroy(MarkedBlock*);
106
107 static bool isAtomAligned(const void*);
108 static MarkedBlock* blockFor(const void*);
109 static size_t firstAtom();
110
111 Heap* heap() const;
112
113 void* allocate();
114
115 enum SweepMode { SweepOnly, SweepToFreeList };
116 FreeList sweep(SweepMode = SweepOnly);
117
118 // While allocating from a free list, MarkedBlock temporarily has bogus
119 // cell liveness data. To restore accurate cell liveness data, call one
120 // of these functions:
121 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
122 void zapFreeList(const FreeList&); // Call this to undo the free list.
123
124 void clearMarks();
125 size_t markCount();
126 bool markCountIsZero(); // Faster than markCount().
127
128 size_t cellSize();
129 bool cellsNeedDestruction();
130
131 size_t size();
132 size_t capacity();
133
134 bool isMarked(const void*);
135 bool testAndSetMarked(const void*);
136 bool isLive(const JSCell*);
137 bool isLiveCell(const void*);
138 void setMarked(const void*);
139
140 #if ENABLE(GGC)
141 void setDirtyObject(const void* atom)
142 {
143 ASSERT(MarkedBlock::blockFor(atom) == this);
144 m_cards.markCardForAtom(atom);
145 }
146
147 uint8_t* addressOfCardFor(const void* atom)
148 {
149 ASSERT(MarkedBlock::blockFor(atom) == this);
150 return &m_cards.cardForAtom(atom);
151 }
152
153 static inline size_t offsetOfCards()
154 {
155 return OBJECT_OFFSETOF(MarkedBlock, m_cards);
156 }
157
158 static inline size_t offsetOfMarks()
159 {
160 return OBJECT_OFFSETOF(MarkedBlock, m_marks);
161 }
162
163 typedef Vector<JSCell*, 32> DirtyCellVector;
164 inline void gatherDirtyCells(DirtyCellVector&);
165 template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&);
166 #endif
167
168 template <typename Functor> void forEachCell(Functor&);
169
170 private:
171 static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
172
173 enum BlockState { New, FreeListed, Allocated, Marked, Zapped };
174 template<bool destructorCallNeeded> FreeList sweepHelper(SweepMode = SweepOnly);
175
176 typedef char Atom[atomSize];
177
178 MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction);
179 Atom* atoms();
180 size_t atomNumber(const void*);
181 void callDestructor(JSCell*);
182 template<BlockState, SweepMode, bool destructorCallNeeded> FreeList specializedSweep();
183
184 #if ENABLE(GGC)
185 CardSet<bytesPerCard, blockSize> m_cards;
186 #endif
187
188 size_t m_atomsPerCell;
189 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
190 #if ENABLE(PARALLEL_GC)
191 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks;
192 #else
193 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
194 #endif
195 bool m_cellsNeedDestruction;
196 BlockState m_state;
197 Heap* m_heap;
198 };
199
200 inline MarkedBlock::FreeList::FreeList()
201 : head(0)
202 , bytes(0)
203 {
204 }
205
206 inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
207 : head(head)
208 , bytes(bytes)
209 {
210 }
211
212 inline size_t MarkedBlock::firstAtom()
213 {
214 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
215 }
216
217 inline MarkedBlock::Atom* MarkedBlock::atoms()
218 {
219 return reinterpret_cast<Atom*>(this);
220 }
221
222 inline bool MarkedBlock::isAtomAligned(const void* p)
223 {
224 return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
225 }
226
227 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
228 {
229 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
230 }
231
232 inline Heap* MarkedBlock::heap() const
233 {
234 return m_heap;
235 }
236
237 inline void MarkedBlock::didConsumeFreeList()
238 {
239 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
240
241 ASSERT(m_state == FreeListed);
242 m_state = Allocated;
243 }
244
245 inline void MarkedBlock::clearMarks()
246 {
247 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
248
249 ASSERT(m_state != New && m_state != FreeListed);
250 m_marks.clearAll();
251
252 // This will become true at the end of the mark phase. We set it now to
253 // avoid an extra pass to do so later.
254 m_state = Marked;
255 }
256
257 inline size_t MarkedBlock::markCount()
258 {
259 return m_marks.count();
260 }
261
262 inline bool MarkedBlock::markCountIsZero()
263 {
264 return m_marks.isEmpty();
265 }
266
267 inline size_t MarkedBlock::cellSize()
268 {
269 return m_atomsPerCell * atomSize;
270 }
271
272 inline bool MarkedBlock::cellsNeedDestruction()
273 {
274 return m_cellsNeedDestruction;
275 }
276
277 inline size_t MarkedBlock::size()
278 {
279 return markCount() * cellSize();
280 }
281
282 inline size_t MarkedBlock::capacity()
283 {
284 return m_allocation.size();
285 }
286
287 inline size_t MarkedBlock::atomNumber(const void* p)
288 {
289 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
290 }
291
292 inline bool MarkedBlock::isMarked(const void* p)
293 {
294 return m_marks.get(atomNumber(p));
295 }
296
297 inline bool MarkedBlock::testAndSetMarked(const void* p)
298 {
299 return m_marks.concurrentTestAndSet(atomNumber(p));
300 }
301
302 inline void MarkedBlock::setMarked(const void* p)
303 {
304 m_marks.set(atomNumber(p));
305 }
306
307 inline bool MarkedBlock::isLive(const JSCell* cell)
308 {
309 switch (m_state) {
310 case Allocated:
311 return true;
312 case Zapped:
313 if (isZapped(cell)) {
314 // Object dead in previous collection, not allocated since previous collection: mark bit should not be set.
315 ASSERT(!m_marks.get(atomNumber(cell)));
316 return false;
317 }
318
319 // Newly allocated objects: mark bit not set.
320 // Objects that survived prior collection: mark bit set.
321 return true;
322 case Marked:
323 return m_marks.get(atomNumber(cell));
324
325 case New:
326 case FreeListed:
327 ASSERT_NOT_REACHED();
328 return false;
329 }
330
331 ASSERT_NOT_REACHED();
332 return false;
333 }
334
335 inline bool MarkedBlock::isLiveCell(const void* p)
336 {
337 ASSERT(MarkedBlock::isAtomAligned(p));
338 size_t atomNumber = this->atomNumber(p);
339 size_t firstAtom = this->firstAtom();
340 if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
341 return false;
342 if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
343 return false;
344 if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
345 return false;
346
347 return isLive(static_cast<const JSCell*>(p));
348 }
349
350 template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
351 {
352 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
353 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
354 if (!isLive(cell))
355 continue;
356
357 functor(cell);
358 }
359 }
360
361 #if ENABLE(GGC)
362 template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells)
363 {
364 if (m_cards.testAndClear(0)) {
365 char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]);
366 const char* end = reinterpret_cast<char*>(this) + bytesPerCard;
367 while (ptr < end) {
368 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
369 if (isMarked(cell))
370 dirtyCells.append(cell);
371 ptr += _cellSize;
372 }
373 }
374
375 const size_t cellOffset = firstAtom() * atomSize % _cellSize;
376 for (size_t i = 1; i < m_cards.cardCount; i++) {
377 if (!m_cards.testAndClear(i))
378 continue;
379 char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset;
380 char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard;
381
382 while (ptr < end) {
383 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
384 if (isMarked(cell))
385 dirtyCells.append(cell);
386 ptr += _cellSize;
387 }
388 }
389 }
390
391 void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells)
392 {
393 COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch);
394
395 ASSERT(m_state != New && m_state != FreeListed);
396
397 // This is an optimisation to avoid having to walk the set of marked
398 // blocks twice during GC.
399 m_state = Marked;
400
401 if (markCountIsZero())
402 return;
403
404 size_t cellSize = this->cellSize();
405 if (cellSize == 32) {
406 gatherDirtyCellsWithSize<32>(dirtyCells);
407 return;
408 }
409 if (cellSize == 64) {
410 gatherDirtyCellsWithSize<64>(dirtyCells);
411 return;
412 }
413
414 const size_t firstCellOffset = firstAtom() * atomSize % cellSize;
415
416 if (m_cards.testAndClear(0)) {
417 char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize;
418 char* end = reinterpret_cast<char*>(this) + bytesPerCard;
419 while (ptr < end) {
420 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
421 if (isMarked(cell))
422 dirtyCells.append(cell);
423 ptr += cellSize;
424 }
425 }
426 for (size_t i = 1; i < m_cards.cardCount; i++) {
427 if (!m_cards.testAndClear(i))
428 continue;
429 char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize);
430 char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize);
431
432 while (ptr < end) {
433 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
434 if (isMarked(cell))
435 dirtyCells.append(cell);
436 ptr += cellSize;
437 }
438 }
439 }
440 #endif
441
442 } // namespace JSC
443
444 namespace WTF {
445
446 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
447 static unsigned hash(JSC::MarkedBlock* const& key)
448 {
449 // Aligned VM regions tend to be monotonically increasing integers,
450 // which is a great hash function, but we have to remove the low bits,
451 // since they're always zero, which is a terrible hash function!
452 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
453 }
454 };
455
456 template<> struct DefaultHash<JSC::MarkedBlock*> {
457 typedef MarkedBlockHash Hash;
458 };
459
460 } // namespace WTF
461
462 #endif // MarkedBlock_h