2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "HeapOperation.h"
26 #include "IterationStatus.h"
28 #include <wtf/Bitmap.h>
29 #include <wtf/DataLog.h>
30 #include <wtf/DoublyLinkedList.h>
31 #include <wtf/HashFunctions.h>
32 #include <wtf/StdLibExtras.h>
33 #include <wtf/Vector.h>
35 // Set to log state transitions of blocks.
36 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
38 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
39 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
41 "%s:%d %s: block %s = %p, %d\n", \
42 __FILE__, __LINE__, __FUNCTION__, \
43 #block, (block), (block)->m_state); \
46 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
53 class MarkedAllocator
;
55 typedef uintptr_t Bits
;
57 static const size_t MB
= 1024 * 1024;
59 bool isZapped(const JSCell
*);
61 // A marked block is a page-aligned container for heap-allocated objects.
62 // Objects are allocated within cells of the marked block. For a given
63 // marked block, all cells have the same size. Objects smaller than the
64 // cell size may be allocated in the marked block, in which case the
65 // allocation suffers from internal fragmentation: wasted space whose
66 // size is equal to the difference between the cell size and the object
69 class MarkedBlock
: public DoublyLinkedListNode
<MarkedBlock
> {
70 friend class WTF::DoublyLinkedListNode
<MarkedBlock
>;
71 friend class LLIntOffsetsExtractor
;
72 friend struct VerifyMarkedOrRetired
;
74 static const size_t atomSize
= 16; // bytes
75 static const size_t atomShiftAmount
= 4; // log_2(atomSize) FIXME: Change atomSize to 16.
76 static const size_t blockSize
= 16 * KB
;
77 static const size_t blockMask
= ~(blockSize
- 1); // blockSize must be a power of two.
79 static const size_t atomsPerBlock
= blockSize
/ atomSize
;
80 static const size_t atomMask
= atomsPerBlock
- 1;
82 static const size_t markByteShiftAmount
= 3; // log_2(word size for m_marks) FIXME: Change word size for m_marks to uint8_t.
93 FreeList(FreeCell
*, size_t);
97 typedef void ReturnType
;
98 void returnValue() { }
103 typedef size_t ReturnType
;
105 CountFunctor() : m_count(0) { }
106 void count(size_t count
) { m_count
+= count
; }
107 ReturnType
returnValue() { return m_count
; }
113 static MarkedBlock
* create(MarkedAllocator
*, size_t capacity
, size_t cellSize
, bool needsDestruction
);
114 static void destroy(MarkedBlock
*);
116 static bool isAtomAligned(const void*);
117 static MarkedBlock
* blockFor(const void*);
118 static size_t firstAtom();
120 void lastChanceToFinalize();
122 MarkedAllocator
* allocator() const;
127 enum SweepMode
{ SweepOnly
, SweepToFreeList
};
128 FreeList
sweep(SweepMode
= SweepOnly
);
132 void visitWeakSet(HeapRootVisitor
&);
135 // While allocating from a free list, MarkedBlock temporarily has bogus
136 // cell liveness data. To restore accurate cell liveness data, call one
137 // of these functions:
138 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
139 void stopAllocating(const FreeList
&);
140 FreeList
resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
141 void didConsumeEmptyFreeList(); // Call this if you sweep a block, but the returned FreeList is empty.
142 void didSweepToNoAvail(); // Call this if you sweep a block and get an empty free list back.
144 // Returns true if the "newly allocated" bitmap was non-null
145 // and was successfully cleared and false otherwise.
146 bool clearNewlyAllocated();
148 template <HeapOperation collectionType
>
149 void clearMarksWithCollectionType();
155 bool needsDestruction() const;
160 bool isMarked(const void*);
161 bool testAndSetMarked(const void*);
162 bool isLive(const JSCell
*);
163 bool isLiveCell(const void*);
164 bool isMarkedOrNewlyAllocated(const JSCell
*);
165 void setMarked(const void*);
166 void clearMarked(const void*);
168 void setRemembered(const void*);
169 void clearRemembered(const void*);
170 void atomicClearRemembered(const void*);
171 bool isRemembered(const void*);
173 bool isNewlyAllocated(const void*);
174 void setNewlyAllocated(const void*);
175 void clearNewlyAllocated(const void*);
177 bool isAllocated() const;
178 bool needsSweeping();
179 void didRetireBlock(const FreeList
&);
180 void willRemoveBlock();
182 template <typename Functor
> IterationStatus
forEachCell(Functor
&);
183 template <typename Functor
> IterationStatus
forEachLiveCell(Functor
&);
184 template <typename Functor
> IterationStatus
forEachDeadCell(Functor
&);
186 static ptrdiff_t offsetOfMarks() { return OBJECT_OFFSETOF(MarkedBlock
, m_marks
); }
189 static const size_t atomAlignmentMask
= atomSize
- 1; // atomSize must be a power of two.
191 enum BlockState
{ New
, FreeListed
, Allocated
, Marked
, Retired
};
192 template<bool callDestructors
> FreeList
sweepHelper(SweepMode
= SweepOnly
);
194 typedef char Atom
[atomSize
];
196 MarkedBlock(MarkedAllocator
*, size_t capacity
, size_t cellSize
, bool needsDestruction
);
198 size_t atomNumber(const void*);
199 void callDestructor(JSCell
*);
200 template<BlockState
, SweepMode
, bool callDestructors
> FreeList
specializedSweep();
205 size_t m_atomsPerCell
;
206 size_t m_endAtom
; // This is a fuzzy end. Always test for < m_endAtom.
207 #if ENABLE(PARALLEL_GC)
208 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapAtomic
, uint8_t> m_marks
;
210 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapNotAtomic
, uint8_t> m_marks
;
212 std::unique_ptr
<WTF::Bitmap
<atomsPerBlock
>> m_newlyAllocated
;
215 bool m_needsDestruction
;
216 MarkedAllocator
* m_allocator
;
221 inline MarkedBlock::FreeList::FreeList()
227 inline MarkedBlock::FreeList::FreeList(FreeCell
* head
, size_t bytes
)
233 inline size_t MarkedBlock::firstAtom()
235 return WTF::roundUpToMultipleOf
<atomSize
>(sizeof(MarkedBlock
)) / atomSize
;
238 inline MarkedBlock::Atom
* MarkedBlock::atoms()
240 return reinterpret_cast<Atom
*>(this);
243 inline bool MarkedBlock::isAtomAligned(const void* p
)
245 return !(reinterpret_cast<Bits
>(p
) & atomAlignmentMask
);
248 inline MarkedBlock
* MarkedBlock::blockFor(const void* p
)
250 return reinterpret_cast<MarkedBlock
*>(reinterpret_cast<Bits
>(p
) & blockMask
);
253 inline MarkedAllocator
* MarkedBlock::allocator() const
258 inline Heap
* MarkedBlock::heap() const
260 return m_weakSet
.heap();
263 inline VM
* MarkedBlock::vm() const
265 return m_weakSet
.vm();
268 inline WeakSet
& MarkedBlock::weakSet()
273 inline void MarkedBlock::shrink()
278 inline void MarkedBlock::visitWeakSet(HeapRootVisitor
& heapRootVisitor
)
280 m_weakSet
.visit(heapRootVisitor
);
283 inline void MarkedBlock::reapWeakSet()
288 inline void MarkedBlock::willRemoveBlock()
290 ASSERT(m_state
!= Retired
);
293 inline void MarkedBlock::didConsumeFreeList()
295 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
297 ASSERT(m_state
== FreeListed
);
301 inline void MarkedBlock::didConsumeEmptyFreeList()
303 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
305 ASSERT(!m_newlyAllocated
);
306 ASSERT(m_state
== FreeListed
);
310 inline size_t MarkedBlock::markCount()
312 return m_marks
.count();
315 inline bool MarkedBlock::isEmpty()
317 return m_marks
.isEmpty() && m_weakSet
.isEmpty() && (!m_newlyAllocated
|| m_newlyAllocated
->isEmpty());
320 inline size_t MarkedBlock::cellSize()
322 return m_atomsPerCell
* atomSize
;
325 inline bool MarkedBlock::needsDestruction() const
327 return m_needsDestruction
;
330 inline size_t MarkedBlock::size()
332 return markCount() * cellSize();
335 inline size_t MarkedBlock::capacity()
340 inline size_t MarkedBlock::atomNumber(const void* p
)
342 return (reinterpret_cast<Bits
>(p
) - reinterpret_cast<Bits
>(this)) / atomSize
;
345 inline bool MarkedBlock::isMarked(const void* p
)
347 return m_marks
.get(atomNumber(p
));
350 inline bool MarkedBlock::testAndSetMarked(const void* p
)
352 return m_marks
.concurrentTestAndSet(atomNumber(p
));
355 inline void MarkedBlock::setMarked(const void* p
)
357 m_marks
.set(atomNumber(p
));
360 inline void MarkedBlock::clearMarked(const void* p
)
362 ASSERT(m_marks
.get(atomNumber(p
)));
363 m_marks
.clear(atomNumber(p
));
366 inline bool MarkedBlock::isNewlyAllocated(const void* p
)
368 return m_newlyAllocated
->get(atomNumber(p
));
371 inline void MarkedBlock::setNewlyAllocated(const void* p
)
373 m_newlyAllocated
->set(atomNumber(p
));
376 inline void MarkedBlock::clearNewlyAllocated(const void* p
)
378 m_newlyAllocated
->clear(atomNumber(p
));
381 inline bool MarkedBlock::clearNewlyAllocated()
383 if (m_newlyAllocated
) {
384 m_newlyAllocated
= nullptr;
390 inline bool MarkedBlock::isMarkedOrNewlyAllocated(const JSCell
* cell
)
392 ASSERT(m_state
== Retired
|| m_state
== Marked
);
393 return m_marks
.get(atomNumber(cell
)) || (m_newlyAllocated
&& isNewlyAllocated(cell
));
396 inline bool MarkedBlock::isLive(const JSCell
* cell
)
404 return isMarkedOrNewlyAllocated(cell
);
408 RELEASE_ASSERT_NOT_REACHED();
412 RELEASE_ASSERT_NOT_REACHED();
416 inline bool MarkedBlock::isLiveCell(const void* p
)
418 ASSERT(MarkedBlock::isAtomAligned(p
));
419 size_t atomNumber
= this->atomNumber(p
);
420 size_t firstAtom
= this->firstAtom();
421 if (atomNumber
< firstAtom
) // Filters pointers into MarkedBlock metadata.
423 if ((atomNumber
- firstAtom
) % m_atomsPerCell
) // Filters pointers into cell middles.
425 if (atomNumber
>= m_endAtom
) // Filters pointers into invalid cells out of the range.
428 return isLive(static_cast<const JSCell
*>(p
));
431 template <typename Functor
> inline IterationStatus
MarkedBlock::forEachCell(Functor
& functor
)
433 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
434 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
435 if (functor(cell
) == IterationStatus::Done
)
436 return IterationStatus::Done
;
438 return IterationStatus::Continue
;
441 template <typename Functor
> inline IterationStatus
MarkedBlock::forEachLiveCell(Functor
& functor
)
443 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
444 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
448 if (functor(cell
) == IterationStatus::Done
)
449 return IterationStatus::Done
;
451 return IterationStatus::Continue
;
454 template <typename Functor
> inline IterationStatus
MarkedBlock::forEachDeadCell(Functor
& functor
)
456 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
457 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
461 if (functor(cell
) == IterationStatus::Done
)
462 return IterationStatus::Done
;
464 return IterationStatus::Continue
;
467 inline bool MarkedBlock::needsSweeping()
469 return m_state
== Marked
;
472 inline bool MarkedBlock::isAllocated() const
474 return m_state
== Allocated
;
481 struct MarkedBlockHash
: PtrHash
<JSC::MarkedBlock
*> {
482 static unsigned hash(JSC::MarkedBlock
* const& key
)
484 // Aligned VM regions tend to be monotonically increasing integers,
485 // which is a great hash function, but we have to remove the low bits,
486 // since they're always zero, which is a terrible hash function!
487 return reinterpret_cast<JSC::Bits
>(key
) / JSC::MarkedBlock::blockSize
;
491 template<> struct DefaultHash
<JSC::MarkedBlock
*> {
492 typedef MarkedBlockHash Hash
;
497 #endif // MarkedBlock_h