2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "BlockAllocator.h"
26 #include "HeapBlock.h"
29 #include <wtf/Bitmap.h>
30 #include <wtf/DataLog.h>
31 #include <wtf/DoublyLinkedList.h>
32 #include <wtf/HashFunctions.h>
33 #include <wtf/PageAllocationAligned.h>
34 #include <wtf/StdLibExtras.h>
35 #include <wtf/Vector.h>
37 // Set to log state transitions of blocks.
38 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
40 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
41 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
43 "%s:%d %s: block %s = %p, %d\n", \
44 __FILE__, __LINE__, __FUNCTION__, \
45 #block, (block), (block)->m_state); \
48 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
55 class MarkedAllocator
;
57 typedef uintptr_t Bits
;
59 static const size_t MB
= 1024 * 1024;
61 bool isZapped(const JSCell
*);
63 // A marked block is a page-aligned container for heap-allocated objects.
64 // Objects are allocated within cells of the marked block. For a given
65 // marked block, all cells have the same size. Objects smaller than the
66 // cell size may be allocated in the marked block, in which case the
67 // allocation suffers from internal fragmentation: wasted space whose
68 // size is equal to the difference between the cell size and the object
71 class MarkedBlock
: public HeapBlock
<MarkedBlock
> {
73 static const size_t atomSize
= 8; // bytes
74 static const size_t blockSize
= 64 * KB
;
75 static const size_t blockMask
= ~(blockSize
- 1); // blockSize must be a power of two.
77 static const size_t atomsPerBlock
= blockSize
/ atomSize
;
78 static const size_t atomMask
= atomsPerBlock
- 1;
89 FreeList(FreeCell
*, size_t);
93 typedef void ReturnType
;
94 void returnValue() { }
99 typedef size_t ReturnType
;
101 CountFunctor() : m_count(0) { }
102 void count(size_t count
) { m_count
+= count
; }
103 ReturnType
returnValue() { return m_count
; }
109 enum DestructorType
{ None
, ImmortalStructure
, Normal
};
110 static MarkedBlock
* create(DeadBlock
*, MarkedAllocator
*, size_t cellSize
, DestructorType
);
112 static bool isAtomAligned(const void*);
113 static MarkedBlock
* blockFor(const void*);
114 static size_t firstAtom();
116 void lastChanceToFinalize();
118 MarkedAllocator
* allocator() const;
123 enum SweepMode
{ SweepOnly
, SweepToFreeList
};
124 FreeList
sweep(SweepMode
= SweepOnly
);
128 void visitWeakSet(HeapRootVisitor
&);
131 // While allocating from a free list, MarkedBlock temporarily has bogus
132 // cell liveness data. To restore accurate cell liveness data, call one
133 // of these functions:
134 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
135 void canonicalizeCellLivenessData(const FreeList
&);
142 DestructorType
destructorType();
147 bool isMarked(const void*);
148 bool testAndSetMarked(const void*);
149 bool isLive(const JSCell
*);
150 bool isLiveCell(const void*);
151 void setMarked(const void*);
152 void clearMarked(const void*);
154 bool isNewlyAllocated(const void*);
155 void setNewlyAllocated(const void*);
156 void clearNewlyAllocated(const void*);
158 bool needsSweeping();
160 template <typename Functor
> void forEachCell(Functor
&);
161 template <typename Functor
> void forEachLiveCell(Functor
&);
162 template <typename Functor
> void forEachDeadCell(Functor
&);
165 static const size_t atomAlignmentMask
= atomSize
- 1; // atomSize must be a power of two.
167 enum BlockState
{ New
, FreeListed
, Allocated
, Marked
};
168 template<DestructorType
> FreeList
sweepHelper(SweepMode
= SweepOnly
);
170 typedef char Atom
[atomSize
];
172 MarkedBlock(Region
*, MarkedAllocator
*, size_t cellSize
, DestructorType
);
174 size_t atomNumber(const void*);
175 void callDestructor(JSCell
*);
176 template<BlockState
, SweepMode
, DestructorType
> FreeList
specializedSweep();
178 size_t m_atomsPerCell
;
179 size_t m_endAtom
; // This is a fuzzy end. Always test for < m_endAtom.
180 #if ENABLE(PARALLEL_GC)
181 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapAtomic
> m_marks
;
183 WTF::Bitmap
<atomsPerBlock
, WTF::BitmapNotAtomic
> m_marks
;
185 OwnPtr
<WTF::Bitmap
<atomsPerBlock
> > m_newlyAllocated
;
187 DestructorType m_destructorType
;
188 MarkedAllocator
* m_allocator
;
193 inline MarkedBlock::FreeList::FreeList()
199 inline MarkedBlock::FreeList::FreeList(FreeCell
* head
, size_t bytes
)
205 inline size_t MarkedBlock::firstAtom()
207 return WTF::roundUpToMultipleOf
<atomSize
>(sizeof(MarkedBlock
)) / atomSize
;
210 inline MarkedBlock::Atom
* MarkedBlock::atoms()
212 return reinterpret_cast<Atom
*>(this);
215 inline bool MarkedBlock::isAtomAligned(const void* p
)
217 return !(reinterpret_cast<Bits
>(p
) & atomAlignmentMask
);
220 inline MarkedBlock
* MarkedBlock::blockFor(const void* p
)
222 return reinterpret_cast<MarkedBlock
*>(reinterpret_cast<Bits
>(p
) & blockMask
);
225 inline void MarkedBlock::lastChanceToFinalize()
227 m_weakSet
.lastChanceToFinalize();
233 inline MarkedAllocator
* MarkedBlock::allocator() const
238 inline Heap
* MarkedBlock::heap() const
240 return m_weakSet
.heap();
243 inline VM
* MarkedBlock::vm() const
245 return m_weakSet
.vm();
248 inline WeakSet
& MarkedBlock::weakSet()
253 inline void MarkedBlock::shrink()
258 inline void MarkedBlock::visitWeakSet(HeapRootVisitor
& heapRootVisitor
)
260 m_weakSet
.visit(heapRootVisitor
);
263 inline void MarkedBlock::reapWeakSet()
268 inline void MarkedBlock::didConsumeFreeList()
270 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
272 ASSERT(m_state
== FreeListed
);
276 inline void MarkedBlock::clearMarks()
278 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
280 ASSERT(m_state
!= New
&& m_state
!= FreeListed
);
282 m_newlyAllocated
.clear();
284 // This will become true at the end of the mark phase. We set it now to
285 // avoid an extra pass to do so later.
289 inline size_t MarkedBlock::markCount()
291 return m_marks
.count();
294 inline bool MarkedBlock::isEmpty()
296 return m_marks
.isEmpty() && m_weakSet
.isEmpty() && (!m_newlyAllocated
|| m_newlyAllocated
->isEmpty());
299 inline size_t MarkedBlock::cellSize()
301 return m_atomsPerCell
* atomSize
;
304 inline MarkedBlock::DestructorType
MarkedBlock::destructorType()
306 return m_destructorType
;
309 inline size_t MarkedBlock::size()
311 return markCount() * cellSize();
314 inline size_t MarkedBlock::capacity()
316 return region()->blockSize();
319 inline size_t MarkedBlock::atomNumber(const void* p
)
321 return (reinterpret_cast<Bits
>(p
) - reinterpret_cast<Bits
>(this)) / atomSize
;
324 inline bool MarkedBlock::isMarked(const void* p
)
326 return m_marks
.get(atomNumber(p
));
329 inline bool MarkedBlock::testAndSetMarked(const void* p
)
331 return m_marks
.concurrentTestAndSet(atomNumber(p
));
334 inline void MarkedBlock::setMarked(const void* p
)
336 m_marks
.set(atomNumber(p
));
339 inline void MarkedBlock::clearMarked(const void* p
)
341 ASSERT(m_marks
.get(atomNumber(p
)));
342 m_marks
.clear(atomNumber(p
));
345 inline bool MarkedBlock::isNewlyAllocated(const void* p
)
347 return m_newlyAllocated
->get(atomNumber(p
));
350 inline void MarkedBlock::setNewlyAllocated(const void* p
)
352 m_newlyAllocated
->set(atomNumber(p
));
355 inline void MarkedBlock::clearNewlyAllocated(const void* p
)
357 m_newlyAllocated
->clear(atomNumber(p
));
360 inline bool MarkedBlock::isLive(const JSCell
* cell
)
367 return m_marks
.get(atomNumber(cell
)) || (m_newlyAllocated
&& isNewlyAllocated(cell
));
371 RELEASE_ASSERT_NOT_REACHED();
375 RELEASE_ASSERT_NOT_REACHED();
379 inline bool MarkedBlock::isLiveCell(const void* p
)
381 ASSERT(MarkedBlock::isAtomAligned(p
));
382 size_t atomNumber
= this->atomNumber(p
);
383 size_t firstAtom
= this->firstAtom();
384 if (atomNumber
< firstAtom
) // Filters pointers into MarkedBlock metadata.
386 if ((atomNumber
- firstAtom
) % m_atomsPerCell
) // Filters pointers into cell middles.
388 if (atomNumber
>= m_endAtom
) // Filters pointers into invalid cells out of the range.
391 return isLive(static_cast<const JSCell
*>(p
));
394 template <typename Functor
> inline void MarkedBlock::forEachCell(Functor
& functor
)
396 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
397 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
402 template <typename Functor
> inline void MarkedBlock::forEachLiveCell(Functor
& functor
)
404 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
405 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
413 template <typename Functor
> inline void MarkedBlock::forEachDeadCell(Functor
& functor
)
415 for (size_t i
= firstAtom(); i
< m_endAtom
; i
+= m_atomsPerCell
) {
416 JSCell
* cell
= reinterpret_cast_ptr
<JSCell
*>(&atoms()[i
]);
424 inline bool MarkedBlock::needsSweeping()
426 return m_state
== Marked
;
433 struct MarkedBlockHash
: PtrHash
<JSC::MarkedBlock
*> {
434 static unsigned hash(JSC::MarkedBlock
* const& key
)
436 // Aligned VM regions tend to be monotonically increasing integers,
437 // which is a great hash function, but we have to remove the low bits,
438 // since they're always zero, which is a terrible hash function!
439 return reinterpret_cast<JSC::Bits
>(key
) / JSC::MarkedBlock::blockSize
;
443 template<> struct DefaultHash
<JSC::MarkedBlock
*> {
444 typedef MarkedBlockHash Hash
;
449 #endif // MarkedBlock_h