#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
-#include <wtf/PageAllocationAligned.h>
+#include <array>
#include <wtf/Bitmap.h>
#include <wtf/DoublyLinkedList.h>
-#include <wtf/FixedArray.h>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
+#include <wtf/RetainPtr.h>
#include <wtf/Vector.h>
namespace JSC {
class Heap;
+class HeapIterationScope;
class JSCell;
class LiveObjectIterator;
class LLIntOffsetsExtractor;
class SlotVisitor;
struct ClearMarks : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->clearMarks(); }
+ void operator()(MarkedBlock* block)
+ {
+ block->clearMarks();
+ }
};
struct Sweep : MarkedBlock::VoidFunctor {
void operator()(MarkedBlock* block) { block->sweep(); }
};
+struct ZombifySweep : MarkedBlock::VoidFunctor {
+ void operator()(MarkedBlock* block)
+ {
+ if (block->needsSweeping())
+ block->sweep();
+ }
+};
+
struct MarkCount : MarkedBlock::CountFunctor {
void operator()(MarkedBlock* block) { count(block->markCount()); }
};
void operator()(MarkedBlock* block) { count(block->markCount() * block->cellSize()); }
};
-struct Capacity : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->capacity()); }
-};
-
class MarkedSpace {
WTF_MAKE_NONCOPYABLE(MarkedSpace);
public:
+ // [ 32... 128 ]
+ static const size_t preciseStep = MarkedBlock::atomSize;
+ static const size_t preciseCutoff = 128;
+ static const size_t preciseCount = preciseCutoff / preciseStep;
+
+ // [ 1024... blockSize ]
+ static const size_t impreciseStep = 2 * preciseCutoff;
+ static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
+ static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+
+ struct Subspace {
+ std::array<MarkedAllocator, preciseCount> preciseAllocators;
+ std::array<MarkedAllocator, impreciseCount> impreciseAllocators;
+ MarkedAllocator largeAllocator;
+ };
+
MarkedSpace(Heap*);
~MarkedSpace();
void lastChanceToFinalize();
MarkedAllocator& firstAllocator();
MarkedAllocator& allocatorFor(size_t);
- MarkedAllocator& immortalStructureDestructorAllocatorFor(size_t);
- MarkedAllocator& normalDestructorAllocatorFor(size_t);
- void* allocateWithNormalDestructor(size_t);
- void* allocateWithImmortalStructureDestructor(size_t);
+ MarkedAllocator& destructorAllocatorFor(size_t);
+ void* allocateWithDestructor(size_t);
void* allocateWithoutDestructor(size_t);
-
+
+ Subspace& subspaceForObjectsWithDestructor() { return m_destructorSpace; }
+ Subspace& subspaceForObjectsWithoutDestructor() { return m_normalSpace; }
+
void resetAllocators();
void visitWeakSets(HeapRootVisitor&);
void reapWeakSets();
MarkedBlockSet& blocks() { return m_blocks; }
-
- void canonicalizeCellLivenessData();
+
+ void willStartIterating();
+ bool isIterating() { return m_isIterating; }
+ void didFinishIterating();
+
+ void stopAllocating();
+ void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
typedef HashSet<MarkedBlock*>::iterator BlockIterator;
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachLiveCell();
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell();
+ template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&, Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&);
+ template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&, Functor&);
+ template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&);
template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&);
template<typename Functor> typename Functor::ReturnType forEachBlock();
void didAddBlock(MarkedBlock*);
void didConsumeFreeList(MarkedBlock*);
+ void didAllocateInBlock(MarkedBlock*);
void clearMarks();
+ void clearNewlyAllocated();
void sweep();
+ void zombifySweep();
size_t objectCount();
size_t size();
size_t capacity();
bool isPagedOut(double deadline);
-private:
- friend class LLIntOffsetsExtractor;
+#if USE(CF)
+ template<typename T> void releaseSoon(RetainPtr<T>&&);
+#endif
- // [ 32... 128 ]
- static const size_t preciseStep = MarkedBlock::atomSize;
- static const size_t preciseCutoff = 128;
- static const size_t preciseCount = preciseCutoff / preciseStep;
+ const Vector<MarkedBlock*>& blocksWithNewObjects() const { return m_blocksWithNewObjects; }
- // [ 1024... blockSize ]
- static const size_t impreciseStep = 2 * preciseCutoff;
- static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
- static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+private:
+ friend class LLIntOffsetsExtractor;
+ friend class JIT;
- struct Subspace {
- FixedArray<MarkedAllocator, preciseCount> preciseAllocators;
- FixedArray<MarkedAllocator, impreciseCount> impreciseAllocators;
- MarkedAllocator largeAllocator;
- };
+ template<typename Functor> void forEachAllocator(Functor&);
+ template<typename Functor> void forEachAllocator();
- Subspace m_normalDestructorSpace;
- Subspace m_immortalStructureDestructorSpace;
+ Subspace m_destructorSpace;
Subspace m_normalSpace;
Heap* m_heap;
+ size_t m_capacity;
+ bool m_isIterating;
MarkedBlockSet m_blocks;
+ Vector<MarkedBlock*> m_blocksWithNewObjects;
};
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(Functor& functor)
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope&, Functor& functor)
{
- canonicalizeCellLivenessData();
-
+ ASSERT(isIterating());
BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachLiveCell(functor);
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->forEachLiveCell(functor) == IterationStatus::Done)
+ break;
+ }
return functor.returnValue();
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell()
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope& scope)
{
Functor functor;
- return forEachLiveCell(functor);
+ return forEachLiveCell(scope, functor);
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(Functor& functor)
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope&, Functor& functor)
{
- canonicalizeCellLivenessData();
-
+ ASSERT(isIterating());
BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachDeadCell(functor);
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->forEachDeadCell(functor) == IterationStatus::Done)
+ break;
+ }
return functor.returnValue();
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell()
+template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope& scope)
{
Functor functor;
- return forEachDeadCell(functor);
+ return forEachDeadCell(scope, functor);
}
inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
return m_normalSpace.largeAllocator;
}
-inline MarkedAllocator& MarkedSpace::immortalStructureDestructorAllocatorFor(size_t bytes)
+inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes)
{
ASSERT(bytes);
if (bytes <= preciseCutoff)
- return m_immortalStructureDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
if (bytes <= impreciseCutoff)
- return m_immortalStructureDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_immortalStructureDestructorSpace.largeAllocator;
-}
-
-inline MarkedAllocator& MarkedSpace::normalDestructorAllocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_normalDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalDestructorSpace.largeAllocator;
+ return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_destructorSpace.largeAllocator;
}
inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
return allocatorFor(bytes).allocate(bytes);
}
-inline void* MarkedSpace::allocateWithImmortalStructureDestructor(size_t bytes)
-{
- return immortalStructureDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithNormalDestructor(size_t bytes)
+inline void* MarkedSpace::allocateWithDestructor(size_t bytes)
{
- return normalDestructorAllocatorFor(bytes).allocate(bytes);
+ return destructorAllocatorFor(bytes).allocate(bytes);
}
template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
{
- for (size_t i = 0; i < preciseCount; ++i) {
+ for (size_t i = 0; i < preciseCount; ++i)
m_normalSpace.preciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- }
-
- for (size_t i = 0; i < impreciseCount; ++i) {
+ for (size_t i = 0; i < impreciseCount; ++i)
m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- }
-
m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
+
+ for (size_t i = 0; i < preciseCount; ++i)
+ m_destructorSpace.preciseAllocators[i].forEachBlock(functor);
+ for (size_t i = 0; i < impreciseCount; ++i)
+ m_destructorSpace.impreciseAllocators[i].forEachBlock(functor);
+ m_destructorSpace.largeAllocator.forEachBlock(functor);
return functor.returnValue();
}
inline void MarkedSpace::didAddBlock(MarkedBlock* block)
{
+ m_capacity += block->capacity();
m_blocks.add(block);
}
-inline void MarkedSpace::clearMarks()
+inline void MarkedSpace::didAllocateInBlock(MarkedBlock* block)
{
- forEachBlock<ClearMarks>();
+#if ENABLE(GGC)
+ m_blocksWithNewObjects.append(block);
+#else
+ UNUSED_PARAM(block);
+#endif
}
inline size_t MarkedSpace::objectCount()
inline size_t MarkedSpace::capacity()
{
- return forEachBlock<Capacity>();
+ return m_capacity;
}
} // namespace JSC