]> git.saurik.com Git - apple/javascriptcore.git/blame - heap/MarkedBlock.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.h
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22#ifndef MarkedBlock_h
23#define MarkedBlock_h
24
81345200 25#include "HeapOperation.h"
ed1e77d3 26#include "IterationStatus.h"
93a37866 27#include "WeakSet.h"
14957cd0 28#include <wtf/Bitmap.h>
6fe7ccc8
A
29#include <wtf/DataLog.h>
30#include <wtf/DoublyLinkedList.h>
31#include <wtf/HashFunctions.h>
14957cd0 32#include <wtf/StdLibExtras.h>
6fe7ccc8
A
33#include <wtf/Vector.h>
34
35// Set to log state transitions of blocks.
36#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
37
38#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
39#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
93a37866 40 dataLogF( \
6fe7ccc8
A
41 "%s:%d %s: block %s = %p, %d\n", \
42 __FILE__, __LINE__, __FUNCTION__, \
43 #block, (block), (block)->m_state); \
44 } while (false)
45#else
46#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
47#endif
14957cd0
A
48
49namespace JSC {
6fe7ccc8 50
14957cd0
A
51 class Heap;
52 class JSCell;
93a37866 53 class MarkedAllocator;
14957cd0
A
54
55 typedef uintptr_t Bits;
56
6fe7ccc8
A
57 static const size_t MB = 1024 * 1024;
58
59 bool isZapped(const JSCell*);
60
61 // A marked block is a page-aligned container for heap-allocated objects.
62 // Objects are allocated within cells of the marked block. For a given
63 // marked block, all cells have the same size. Objects smaller than the
64 // cell size may be allocated in the marked block, in which case the
65 // allocation suffers from internal fragmentation: wasted space whose
66 // size is equal to the difference between the cell size and the object
67 // size.
68
ed1e77d3
A
69 class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> {
70 friend class WTF::DoublyLinkedListNode<MarkedBlock>;
81345200
A
71 friend class LLIntOffsetsExtractor;
72 friend struct VerifyMarkedOrRetired;
14957cd0 73 public:
81345200
A
74 static const size_t atomSize = 16; // bytes
75 static const size_t atomShiftAmount = 4; // log_2(atomSize) FIXME: Change atomSize to 16.
ed1e77d3 76 static const size_t blockSize = 16 * KB;
6fe7ccc8 77 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
14957cd0 78
93a37866 79 static const size_t atomsPerBlock = blockSize / atomSize;
6fe7ccc8 80 static const size_t atomMask = atomsPerBlock - 1;
6fe7ccc8 81
81345200
A
82 static const size_t markByteShiftAmount = 3; // log_2(word size for m_marks) FIXME: Change word size for m_marks to uint8_t.
83
6fe7ccc8
A
84 struct FreeCell {
85 FreeCell* next;
86 };
87
88 struct FreeList {
89 FreeCell* head;
90 size_t bytes;
91
92 FreeList();
93 FreeList(FreeCell*, size_t);
94 };
95
96 struct VoidFunctor {
97 typedef void ReturnType;
98 void returnValue() { }
99 };
100
93a37866
A
101 class CountFunctor {
102 public:
103 typedef size_t ReturnType;
104
105 CountFunctor() : m_count(0) { }
106 void count(size_t count) { m_count += count; }
107 ReturnType returnValue() { return m_count; }
108
109 private:
110 ReturnType m_count;
111 };
112
ed1e77d3
A
113 static MarkedBlock* create(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
114 static void destroy(MarkedBlock*);
14957cd0
A
115
116 static bool isAtomAligned(const void*);
117 static MarkedBlock* blockFor(const void*);
118 static size_t firstAtom();
119
93a37866
A
120 void lastChanceToFinalize();
121
122 MarkedAllocator* allocator() const;
14957cd0 123 Heap* heap() const;
93a37866
A
124 VM* vm() const;
125 WeakSet& weakSet();
14957cd0 126
6fe7ccc8
A
127 enum SweepMode { SweepOnly, SweepToFreeList };
128 FreeList sweep(SweepMode = SweepOnly);
129
93a37866
A
130 void shrink();
131
132 void visitWeakSet(HeapRootVisitor&);
133 void reapWeakSet();
134
6fe7ccc8
A
135 // While allocating from a free list, MarkedBlock temporarily has bogus
136 // cell liveness data. To restore accurate cell liveness data, call one
137 // of these functions:
138 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
81345200
A
139 void stopAllocating(const FreeList&);
140 FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
141 void didConsumeEmptyFreeList(); // Call this if you sweep a block, but the returned FreeList is empty.
142 void didSweepToNoAvail(); // Call this if you sweep a block and get an empty free list back.
143
144 // Returns true if the "newly allocated" bitmap was non-null
145 // and was successfully cleared and false otherwise.
146 bool clearNewlyAllocated();
14957cd0 147 void clearMarks();
81345200
A
148 template <HeapOperation collectionType>
149 void clearMarksWithCollectionType();
150
14957cd0 151 size_t markCount();
93a37866 152 bool isEmpty();
14957cd0
A
153
154 size_t cellSize();
ed1e77d3 155 bool needsDestruction() const;
14957cd0
A
156
157 size_t size();
158 size_t capacity();
159
14957cd0
A
160 bool isMarked(const void*);
161 bool testAndSetMarked(const void*);
6fe7ccc8
A
162 bool isLive(const JSCell*);
163 bool isLiveCell(const void*);
ed1e77d3 164 bool isMarkedOrNewlyAllocated(const JSCell*);
14957cd0 165 void setMarked(const void*);
93a37866 166 void clearMarked(const void*);
6fe7ccc8 167
81345200
A
168 void setRemembered(const void*);
169 void clearRemembered(const void*);
170 void atomicClearRemembered(const void*);
171 bool isRemembered(const void*);
172
93a37866
A
173 bool isNewlyAllocated(const void*);
174 void setNewlyAllocated(const void*);
175 void clearNewlyAllocated(const void*);
6fe7ccc8 176
ed1e77d3 177 bool isAllocated() const;
93a37866 178 bool needsSweeping();
81345200
A
179 void didRetireBlock(const FreeList&);
180 void willRemoveBlock();
6fe7ccc8 181
ed1e77d3
A
182 template <typename Functor> IterationStatus forEachCell(Functor&);
183 template <typename Functor> IterationStatus forEachLiveCell(Functor&);
184 template <typename Functor> IterationStatus forEachDeadCell(Functor&);
14957cd0 185
81345200
A
186 static ptrdiff_t offsetOfMarks() { return OBJECT_OFFSETOF(MarkedBlock, m_marks); }
187
14957cd0 188 private:
6fe7ccc8 189 static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
14957cd0 190
81345200 191 enum BlockState { New, FreeListed, Allocated, Marked, Retired };
ed1e77d3 192 template<bool callDestructors> FreeList sweepHelper(SweepMode = SweepOnly);
14957cd0
A
193
194 typedef char Atom[atomSize];
195
ed1e77d3 196 MarkedBlock(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
14957cd0 197 Atom* atoms();
6fe7ccc8 198 size_t atomNumber(const void*);
ed1e77d3
A
199 void callDestructor(JSCell*);
200 template<BlockState, SweepMode, bool callDestructors> FreeList specializedSweep();
6fe7ccc8 201
ed1e77d3
A
202 MarkedBlock* m_prev;
203 MarkedBlock* m_next;
204
14957cd0 205 size_t m_atomsPerCell;
6fe7ccc8
A
206 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
207#if ENABLE(PARALLEL_GC)
81345200 208 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
6fe7ccc8 209#else
81345200 210 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_marks;
6fe7ccc8 211#endif
ed1e77d3 212 std::unique_ptr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
93a37866 213
ed1e77d3
A
214 size_t m_capacity;
215 bool m_needsDestruction;
93a37866 216 MarkedAllocator* m_allocator;
6fe7ccc8 217 BlockState m_state;
93a37866 218 WeakSet m_weakSet;
14957cd0
A
219 };
220
6fe7ccc8
A
221 inline MarkedBlock::FreeList::FreeList()
222 : head(0)
223 , bytes(0)
224 {
225 }
226
227 inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
228 : head(head)
229 , bytes(bytes)
230 {
231 }
232
14957cd0
A
233 inline size_t MarkedBlock::firstAtom()
234 {
235 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
236 }
237
238 inline MarkedBlock::Atom* MarkedBlock::atoms()
239 {
240 return reinterpret_cast<Atom*>(this);
241 }
242
243 inline bool MarkedBlock::isAtomAligned(const void* p)
244 {
6fe7ccc8 245 return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
14957cd0
A
246 }
247
248 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
249 {
6fe7ccc8 250 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
14957cd0
A
251 }
252
93a37866
A
253 inline MarkedAllocator* MarkedBlock::allocator() const
254 {
255 return m_allocator;
256 }
257
14957cd0
A
258 inline Heap* MarkedBlock::heap() const
259 {
93a37866
A
260 return m_weakSet.heap();
261 }
262
263 inline VM* MarkedBlock::vm() const
264 {
265 return m_weakSet.vm();
266 }
267
268 inline WeakSet& MarkedBlock::weakSet()
269 {
270 return m_weakSet;
271 }
272
273 inline void MarkedBlock::shrink()
274 {
275 m_weakSet.shrink();
276 }
277
278 inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
279 {
280 m_weakSet.visit(heapRootVisitor);
281 }
282
283 inline void MarkedBlock::reapWeakSet()
284 {
285 m_weakSet.reap();
14957cd0
A
286 }
287
81345200
A
288 inline void MarkedBlock::willRemoveBlock()
289 {
290 ASSERT(m_state != Retired);
291 }
292
6fe7ccc8 293 inline void MarkedBlock::didConsumeFreeList()
14957cd0 294 {
6fe7ccc8 295 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
14957cd0 296
6fe7ccc8
A
297 ASSERT(m_state == FreeListed);
298 m_state = Allocated;
14957cd0
A
299 }
300
81345200 301 inline void MarkedBlock::didConsumeEmptyFreeList()
14957cd0 302 {
6fe7ccc8 303 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
14957cd0 304
81345200
A
305 ASSERT(!m_newlyAllocated);
306 ASSERT(m_state == FreeListed);
6fe7ccc8 307 m_state = Marked;
14957cd0
A
308 }
309
6fe7ccc8 310 inline size_t MarkedBlock::markCount()
14957cd0 311 {
6fe7ccc8 312 return m_marks.count();
14957cd0
A
313 }
314
93a37866 315 inline bool MarkedBlock::isEmpty()
14957cd0 316 {
93a37866 317 return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
14957cd0
A
318 }
319
6fe7ccc8 320 inline size_t MarkedBlock::cellSize()
14957cd0 321 {
6fe7ccc8 322 return m_atomsPerCell * atomSize;
14957cd0
A
323 }
324
ed1e77d3 325 inline bool MarkedBlock::needsDestruction() const
14957cd0 326 {
ed1e77d3 327 return m_needsDestruction;
14957cd0
A
328 }
329
330 inline size_t MarkedBlock::size()
331 {
332 return markCount() * cellSize();
333 }
334
335 inline size_t MarkedBlock::capacity()
336 {
ed1e77d3 337 return m_capacity;
14957cd0
A
338 }
339
14957cd0
A
340 inline size_t MarkedBlock::atomNumber(const void* p)
341 {
6fe7ccc8 342 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
14957cd0
A
343 }
344
345 inline bool MarkedBlock::isMarked(const void* p)
346 {
347 return m_marks.get(atomNumber(p));
348 }
349
350 inline bool MarkedBlock::testAndSetMarked(const void* p)
351 {
6fe7ccc8 352 return m_marks.concurrentTestAndSet(atomNumber(p));
14957cd0
A
353 }
354
355 inline void MarkedBlock::setMarked(const void* p)
356 {
357 m_marks.set(atomNumber(p));
358 }
359
93a37866
A
360 inline void MarkedBlock::clearMarked(const void* p)
361 {
362 ASSERT(m_marks.get(atomNumber(p)));
363 m_marks.clear(atomNumber(p));
364 }
365
366 inline bool MarkedBlock::isNewlyAllocated(const void* p)
367 {
368 return m_newlyAllocated->get(atomNumber(p));
369 }
370
371 inline void MarkedBlock::setNewlyAllocated(const void* p)
372 {
373 m_newlyAllocated->set(atomNumber(p));
374 }
375
376 inline void MarkedBlock::clearNewlyAllocated(const void* p)
377 {
378 m_newlyAllocated->clear(atomNumber(p));
379 }
380
81345200
A
381 inline bool MarkedBlock::clearNewlyAllocated()
382 {
383 if (m_newlyAllocated) {
ed1e77d3 384 m_newlyAllocated = nullptr;
81345200
A
385 return true;
386 }
387 return false;
388 }
389
ed1e77d3
A
390 inline bool MarkedBlock::isMarkedOrNewlyAllocated(const JSCell* cell)
391 {
392 ASSERT(m_state == Retired || m_state == Marked);
393 return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
394 }
395
6fe7ccc8
A
396 inline bool MarkedBlock::isLive(const JSCell* cell)
397 {
398 switch (m_state) {
399 case Allocated:
400 return true;
93a37866 401
81345200 402 case Retired:
6fe7ccc8 403 case Marked:
ed1e77d3 404 return isMarkedOrNewlyAllocated(cell);
6fe7ccc8
A
405
406 case New:
407 case FreeListed:
93a37866 408 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
409 return false;
410 }
411
93a37866 412 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
413 return false;
414 }
415
416 inline bool MarkedBlock::isLiveCell(const void* p)
417 {
418 ASSERT(MarkedBlock::isAtomAligned(p));
419 size_t atomNumber = this->atomNumber(p);
420 size_t firstAtom = this->firstAtom();
421 if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
422 return false;
423 if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
424 return false;
425 if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
426 return false;
427
428 return isLive(static_cast<const JSCell*>(p));
429 }
430
ed1e77d3 431 template <typename Functor> inline IterationStatus MarkedBlock::forEachCell(Functor& functor)
14957cd0
A
432 {
433 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
6fe7ccc8 434 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
ed1e77d3
A
435 if (functor(cell) == IterationStatus::Done)
436 return IterationStatus::Done;
14957cd0 437 }
ed1e77d3 438 return IterationStatus::Continue;
14957cd0
A
439 }
440
ed1e77d3 441 template <typename Functor> inline IterationStatus MarkedBlock::forEachLiveCell(Functor& functor)
93a37866
A
442 {
443 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
444 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
445 if (!isLive(cell))
446 continue;
447
ed1e77d3
A
448 if (functor(cell) == IterationStatus::Done)
449 return IterationStatus::Done;
6fe7ccc8 450 }
ed1e77d3 451 return IterationStatus::Continue;
6fe7ccc8 452 }
6fe7ccc8 453
ed1e77d3 454 template <typename Functor> inline IterationStatus MarkedBlock::forEachDeadCell(Functor& functor)
93a37866
A
455 {
456 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
457 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
458 if (isLive(cell))
459 continue;
6fe7ccc8 460
ed1e77d3
A
461 if (functor(cell) == IterationStatus::Done)
462 return IterationStatus::Done;
6fe7ccc8 463 }
ed1e77d3 464 return IterationStatus::Continue;
6fe7ccc8 465 }
93a37866
A
466
467 inline bool MarkedBlock::needsSweeping()
468 {
469 return m_state == Marked;
6fe7ccc8 470 }
6fe7ccc8 471
ed1e77d3
A
472 inline bool MarkedBlock::isAllocated() const
473 {
474 return m_state == Allocated;
475 }
476
14957cd0
A
477} // namespace JSC
478
6fe7ccc8
A
479namespace WTF {
480
481 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
482 static unsigned hash(JSC::MarkedBlock* const& key)
483 {
484 // Aligned VM regions tend to be monotonically increasing integers,
485 // which is a great hash function, but we have to remove the low bits,
486 // since they're always zero, which is a terrible hash function!
487 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
488 }
489 };
490
491 template<> struct DefaultHash<JSC::MarkedBlock*> {
492 typedef MarkedBlockHash Hash;
493 };
494
495} // namespace WTF
496
497#endif // MarkedBlock_h