]> git.saurik.com Git - apple/javascriptcore.git/blob - heap/MarkedBlock.h
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / heap / MarkedBlock.h
1 /*
2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22 #ifndef MarkedBlock_h
23 #define MarkedBlock_h
24
25 #include "BlockAllocator.h"
26 #include "HeapBlock.h"
27
28 #include "HeapOperation.h"
29 #include "WeakSet.h"
30 #include <wtf/Bitmap.h>
31 #include <wtf/DataLog.h>
32 #include <wtf/DoublyLinkedList.h>
33 #include <wtf/HashFunctions.h>
34 #include <wtf/PageAllocationAligned.h>
35 #include <wtf/StdLibExtras.h>
36 #include <wtf/Vector.h>
37
38 // Set to log state transitions of blocks.
39 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
40
41 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
42 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
43 dataLogF( \
44 "%s:%d %s: block %s = %p, %d\n", \
45 __FILE__, __LINE__, __FUNCTION__, \
46 #block, (block), (block)->m_state); \
47 } while (false)
48 #else
49 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
50 #endif
51
52 namespace JSC {
53
54 class Heap;
55 class JSCell;
56 class MarkedAllocator;
57
58 typedef uintptr_t Bits;
59
60 static const size_t MB = 1024 * 1024;
61
62 bool isZapped(const JSCell*);
63
64 // A marked block is a page-aligned container for heap-allocated objects.
65 // Objects are allocated within cells of the marked block. For a given
66 // marked block, all cells have the same size. Objects smaller than the
67 // cell size may be allocated in the marked block, in which case the
68 // allocation suffers from internal fragmentation: wasted space whose
69 // size is equal to the difference between the cell size and the object
70 // size.
71
72 class MarkedBlock : public HeapBlock<MarkedBlock> {
73 friend class LLIntOffsetsExtractor;
74 friend struct VerifyMarkedOrRetired;
75 public:
76 static const size_t atomSize = 16; // bytes
77 static const size_t atomShiftAmount = 4; // log_2(atomSize) FIXME: Change atomSize to 16.
78 static const size_t blockSize = 64 * KB;
79 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
80
81 static const size_t atomsPerBlock = blockSize / atomSize;
82 static const size_t atomMask = atomsPerBlock - 1;
83
84 static const size_t markByteShiftAmount = 3; // log_2(word size for m_marks) FIXME: Change word size for m_marks to uint8_t.
85
86 struct FreeCell {
87 FreeCell* next;
88 };
89
90 struct FreeList {
91 FreeCell* head;
92 size_t bytes;
93
94 FreeList();
95 FreeList(FreeCell*, size_t);
96 };
97
98 struct VoidFunctor {
99 typedef void ReturnType;
100 void returnValue() { }
101 };
102
103 class CountFunctor {
104 public:
105 typedef size_t ReturnType;
106
107 CountFunctor() : m_count(0) { }
108 void count(size_t count) { m_count += count; }
109 ReturnType returnValue() { return m_count; }
110
111 private:
112 ReturnType m_count;
113 };
114
115 enum DestructorType { None, ImmortalStructure, Normal };
116 static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
117
118 static bool isAtomAligned(const void*);
119 static MarkedBlock* blockFor(const void*);
120 static size_t firstAtom();
121
122 void lastChanceToFinalize();
123
124 MarkedAllocator* allocator() const;
125 Heap* heap() const;
126 VM* vm() const;
127 WeakSet& weakSet();
128
129 enum SweepMode { SweepOnly, SweepToFreeList };
130 FreeList sweep(SweepMode = SweepOnly);
131
132 void shrink();
133
134 void visitWeakSet(HeapRootVisitor&);
135 void reapWeakSet();
136
137 // While allocating from a free list, MarkedBlock temporarily has bogus
138 // cell liveness data. To restore accurate cell liveness data, call one
139 // of these functions:
140 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
141 void stopAllocating(const FreeList&);
142 FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
143 void didConsumeEmptyFreeList(); // Call this if you sweep a block, but the returned FreeList is empty.
144 void didSweepToNoAvail(); // Call this if you sweep a block and get an empty free list back.
145
146 // Returns true if the "newly allocated" bitmap was non-null
147 // and was successfully cleared and false otherwise.
148 bool clearNewlyAllocated();
149 void clearMarks();
150 void clearRememberedSet();
151 template <HeapOperation collectionType>
152 void clearMarksWithCollectionType();
153
154 size_t markCount();
155 bool isEmpty();
156
157 size_t cellSize();
158 DestructorType destructorType();
159
160 size_t size();
161 size_t capacity();
162
163 bool isMarked(const void*);
164 bool testAndSetMarked(const void*);
165 bool isLive(const JSCell*);
166 bool isLiveCell(const void*);
167 void setMarked(const void*);
168 void clearMarked(const void*);
169
170 void setRemembered(const void*);
171 void clearRemembered(const void*);
172 void atomicClearRemembered(const void*);
173 bool isRemembered(const void*);
174
175 bool isNewlyAllocated(const void*);
176 void setNewlyAllocated(const void*);
177 void clearNewlyAllocated(const void*);
178
179 bool needsSweeping();
180 void didRetireBlock(const FreeList&);
181 void willRemoveBlock();
182
183 template <typename Functor> void forEachCell(Functor&);
184 template <typename Functor> void forEachLiveCell(Functor&);
185 template <typename Functor> void forEachDeadCell(Functor&);
186
187 static ptrdiff_t offsetOfMarks() { return OBJECT_OFFSETOF(MarkedBlock, m_marks); }
188
189 private:
190 static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
191
192 enum BlockState { New, FreeListed, Allocated, Marked, Retired };
193 template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
194
195 typedef char Atom[atomSize];
196
197 MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
198 Atom* atoms();
199 size_t atomNumber(const void*);
200 template<DestructorType> void callDestructor(JSCell*);
201 template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
202
203 size_t m_atomsPerCell;
204 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
205 #if ENABLE(PARALLEL_GC)
206 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
207 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_rememberedSet;
208 #else
209 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_marks;
210 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_rememberedSet;
211 #endif
212 OwnPtr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
213
214 DestructorType m_destructorType;
215 MarkedAllocator* m_allocator;
216 BlockState m_state;
217 WeakSet m_weakSet;
218 };
219
220 inline MarkedBlock::FreeList::FreeList()
221 : head(0)
222 , bytes(0)
223 {
224 }
225
226 inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
227 : head(head)
228 , bytes(bytes)
229 {
230 }
231
232 inline size_t MarkedBlock::firstAtom()
233 {
234 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
235 }
236
237 inline MarkedBlock::Atom* MarkedBlock::atoms()
238 {
239 return reinterpret_cast<Atom*>(this);
240 }
241
242 inline bool MarkedBlock::isAtomAligned(const void* p)
243 {
244 return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
245 }
246
247 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
248 {
249 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
250 }
251
252 inline MarkedAllocator* MarkedBlock::allocator() const
253 {
254 return m_allocator;
255 }
256
257 inline Heap* MarkedBlock::heap() const
258 {
259 return m_weakSet.heap();
260 }
261
262 inline VM* MarkedBlock::vm() const
263 {
264 return m_weakSet.vm();
265 }
266
267 inline WeakSet& MarkedBlock::weakSet()
268 {
269 return m_weakSet;
270 }
271
272 inline void MarkedBlock::shrink()
273 {
274 m_weakSet.shrink();
275 }
276
277 inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
278 {
279 m_weakSet.visit(heapRootVisitor);
280 }
281
282 inline void MarkedBlock::reapWeakSet()
283 {
284 m_weakSet.reap();
285 }
286
287 inline void MarkedBlock::willRemoveBlock()
288 {
289 ASSERT(m_state != Retired);
290 }
291
292 inline void MarkedBlock::didConsumeFreeList()
293 {
294 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
295
296 ASSERT(m_state == FreeListed);
297 m_state = Allocated;
298 }
299
300 inline void MarkedBlock::didConsumeEmptyFreeList()
301 {
302 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
303
304 ASSERT(!m_newlyAllocated);
305 ASSERT(m_state == FreeListed);
306 m_state = Marked;
307 }
308
309 inline size_t MarkedBlock::markCount()
310 {
311 return m_marks.count();
312 }
313
314 inline bool MarkedBlock::isEmpty()
315 {
316 return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
317 }
318
319 inline size_t MarkedBlock::cellSize()
320 {
321 return m_atomsPerCell * atomSize;
322 }
323
324 inline MarkedBlock::DestructorType MarkedBlock::destructorType()
325 {
326 return m_destructorType;
327 }
328
329 inline size_t MarkedBlock::size()
330 {
331 return markCount() * cellSize();
332 }
333
334 inline size_t MarkedBlock::capacity()
335 {
336 return region()->blockSize();
337 }
338
339 inline size_t MarkedBlock::atomNumber(const void* p)
340 {
341 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
342 }
343
344 inline void MarkedBlock::setRemembered(const void* p)
345 {
346 m_rememberedSet.set(atomNumber(p));
347 }
348
349 inline void MarkedBlock::clearRemembered(const void* p)
350 {
351 m_rememberedSet.clear(atomNumber(p));
352 }
353
354 inline void MarkedBlock::atomicClearRemembered(const void* p)
355 {
356 m_rememberedSet.concurrentTestAndClear(atomNumber(p));
357 }
358
359 inline bool MarkedBlock::isRemembered(const void* p)
360 {
361 return m_rememberedSet.get(atomNumber(p));
362 }
363
364 inline bool MarkedBlock::isMarked(const void* p)
365 {
366 return m_marks.get(atomNumber(p));
367 }
368
369 inline bool MarkedBlock::testAndSetMarked(const void* p)
370 {
371 return m_marks.concurrentTestAndSet(atomNumber(p));
372 }
373
374 inline void MarkedBlock::setMarked(const void* p)
375 {
376 m_marks.set(atomNumber(p));
377 }
378
379 inline void MarkedBlock::clearMarked(const void* p)
380 {
381 ASSERT(m_marks.get(atomNumber(p)));
382 m_marks.clear(atomNumber(p));
383 }
384
385 inline bool MarkedBlock::isNewlyAllocated(const void* p)
386 {
387 return m_newlyAllocated->get(atomNumber(p));
388 }
389
390 inline void MarkedBlock::setNewlyAllocated(const void* p)
391 {
392 m_newlyAllocated->set(atomNumber(p));
393 }
394
395 inline void MarkedBlock::clearNewlyAllocated(const void* p)
396 {
397 m_newlyAllocated->clear(atomNumber(p));
398 }
399
400 inline bool MarkedBlock::clearNewlyAllocated()
401 {
402 if (m_newlyAllocated) {
403 m_newlyAllocated.clear();
404 return true;
405 }
406 return false;
407 }
408
409 inline bool MarkedBlock::isLive(const JSCell* cell)
410 {
411 switch (m_state) {
412 case Allocated:
413 return true;
414
415 case Retired:
416 case Marked:
417 return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
418
419 case New:
420 case FreeListed:
421 RELEASE_ASSERT_NOT_REACHED();
422 return false;
423 }
424
425 RELEASE_ASSERT_NOT_REACHED();
426 return false;
427 }
428
429 inline bool MarkedBlock::isLiveCell(const void* p)
430 {
431 ASSERT(MarkedBlock::isAtomAligned(p));
432 size_t atomNumber = this->atomNumber(p);
433 size_t firstAtom = this->firstAtom();
434 if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
435 return false;
436 if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
437 return false;
438 if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
439 return false;
440
441 return isLive(static_cast<const JSCell*>(p));
442 }
443
444 template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
445 {
446 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
447 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
448 functor(cell);
449 }
450 }
451
452 template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor)
453 {
454 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
455 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
456 if (!isLive(cell))
457 continue;
458
459 functor(cell);
460 }
461 }
462
463 template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor)
464 {
465 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
466 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
467 if (isLive(cell))
468 continue;
469
470 functor(cell);
471 }
472 }
473
474 inline bool MarkedBlock::needsSweeping()
475 {
476 return m_state == Marked;
477 }
478
479 } // namespace JSC
480
481 namespace WTF {
482
483 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
484 static unsigned hash(JSC::MarkedBlock* const& key)
485 {
486 // Aligned VM regions tend to be monotonically increasing integers,
487 // which is a great hash function, but we have to remove the low bits,
488 // since they're always zero, which is a terrible hash function!
489 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
490 }
491 };
492
493 template<> struct DefaultHash<JSC::MarkedBlock*> {
494 typedef MarkedBlockHash Hash;
495 };
496
497 } // namespace WTF
498
499 #endif // MarkedBlock_h