]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) | |
3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) | |
4 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #ifndef MarkedBlock_h | |
23 | #define MarkedBlock_h | |
24 | ||
25 | #include "HeapOperation.h" | |
26 | #include "IterationStatus.h" | |
27 | #include "WeakSet.h" | |
28 | #include <wtf/Bitmap.h> | |
29 | #include <wtf/DataLog.h> | |
30 | #include <wtf/DoublyLinkedList.h> | |
31 | #include <wtf/HashFunctions.h> | |
32 | #include <wtf/StdLibExtras.h> | |
33 | #include <wtf/Vector.h> | |
34 | ||
35 | // Set to log state transitions of blocks. | |
36 | #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0 | |
37 | ||
38 | #if HEAP_LOG_BLOCK_STATE_TRANSITIONS | |
39 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \ | |
40 | dataLogF( \ | |
41 | "%s:%d %s: block %s = %p, %d\n", \ | |
42 | __FILE__, __LINE__, __FUNCTION__, \ | |
43 | #block, (block), (block)->m_state); \ | |
44 | } while (false) | |
45 | #else | |
46 | #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0) | |
47 | #endif | |
48 | ||
49 | namespace JSC { | |
50 | ||
51 | class Heap; | |
52 | class JSCell; | |
53 | class MarkedAllocator; | |
54 | ||
55 | typedef uintptr_t Bits; | |
56 | ||
57 | static const size_t MB = 1024 * 1024; | |
58 | ||
59 | bool isZapped(const JSCell*); | |
60 | ||
61 | // A marked block is a page-aligned container for heap-allocated objects. | |
62 | // Objects are allocated within cells of the marked block. For a given | |
63 | // marked block, all cells have the same size. Objects smaller than the | |
64 | // cell size may be allocated in the marked block, in which case the | |
65 | // allocation suffers from internal fragmentation: wasted space whose | |
66 | // size is equal to the difference between the cell size and the object | |
67 | // size. | |
68 | ||
69 | class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> { | |
70 | friend class WTF::DoublyLinkedListNode<MarkedBlock>; | |
71 | friend class LLIntOffsetsExtractor; | |
72 | friend struct VerifyMarkedOrRetired; | |
73 | public: | |
74 | static const size_t atomSize = 16; // bytes | |
75 | static const size_t atomShiftAmount = 4; // log_2(atomSize) FIXME: Change atomSize to 16. | |
76 | static const size_t blockSize = 16 * KB; | |
77 | static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two. | |
78 | ||
79 | static const size_t atomsPerBlock = blockSize / atomSize; | |
80 | static const size_t atomMask = atomsPerBlock - 1; | |
81 | ||
82 | static const size_t markByteShiftAmount = 3; // log_2(word size for m_marks) FIXME: Change word size for m_marks to uint8_t. | |
83 | ||
84 | struct FreeCell { | |
85 | FreeCell* next; | |
86 | }; | |
87 | ||
88 | struct FreeList { | |
89 | FreeCell* head; | |
90 | size_t bytes; | |
91 | ||
92 | FreeList(); | |
93 | FreeList(FreeCell*, size_t); | |
94 | }; | |
95 | ||
96 | struct VoidFunctor { | |
97 | typedef void ReturnType; | |
98 | void returnValue() { } | |
99 | }; | |
100 | ||
101 | class CountFunctor { | |
102 | public: | |
103 | typedef size_t ReturnType; | |
104 | ||
105 | CountFunctor() : m_count(0) { } | |
106 | void count(size_t count) { m_count += count; } | |
107 | ReturnType returnValue() { return m_count; } | |
108 | ||
109 | private: | |
110 | ReturnType m_count; | |
111 | }; | |
112 | ||
113 | static MarkedBlock* create(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction); | |
114 | static void destroy(MarkedBlock*); | |
115 | ||
116 | static bool isAtomAligned(const void*); | |
117 | static MarkedBlock* blockFor(const void*); | |
118 | static size_t firstAtom(); | |
119 | ||
120 | void lastChanceToFinalize(); | |
121 | ||
122 | MarkedAllocator* allocator() const; | |
123 | Heap* heap() const; | |
124 | VM* vm() const; | |
125 | WeakSet& weakSet(); | |
126 | ||
127 | enum SweepMode { SweepOnly, SweepToFreeList }; | |
128 | FreeList sweep(SweepMode = SweepOnly); | |
129 | ||
130 | void shrink(); | |
131 | ||
132 | void visitWeakSet(HeapRootVisitor&); | |
133 | void reapWeakSet(); | |
134 | ||
135 | // While allocating from a free list, MarkedBlock temporarily has bogus | |
136 | // cell liveness data. To restore accurate cell liveness data, call one | |
137 | // of these functions: | |
138 | void didConsumeFreeList(); // Call this once you've allocated all the items in the free list. | |
139 | void stopAllocating(const FreeList&); | |
140 | FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose. | |
141 | void didConsumeEmptyFreeList(); // Call this if you sweep a block, but the returned FreeList is empty. | |
142 | void didSweepToNoAvail(); // Call this if you sweep a block and get an empty free list back. | |
143 | ||
144 | // Returns true if the "newly allocated" bitmap was non-null | |
145 | // and was successfully cleared and false otherwise. | |
146 | bool clearNewlyAllocated(); | |
147 | void clearMarks(); | |
148 | template <HeapOperation collectionType> | |
149 | void clearMarksWithCollectionType(); | |
150 | ||
151 | size_t markCount(); | |
152 | bool isEmpty(); | |
153 | ||
154 | size_t cellSize(); | |
155 | bool needsDestruction() const; | |
156 | ||
157 | size_t size(); | |
158 | size_t capacity(); | |
159 | ||
160 | bool isMarked(const void*); | |
161 | bool testAndSetMarked(const void*); | |
162 | bool isLive(const JSCell*); | |
163 | bool isLiveCell(const void*); | |
164 | bool isMarkedOrNewlyAllocated(const JSCell*); | |
165 | void setMarked(const void*); | |
166 | void clearMarked(const void*); | |
167 | ||
168 | void setRemembered(const void*); | |
169 | void clearRemembered(const void*); | |
170 | void atomicClearRemembered(const void*); | |
171 | bool isRemembered(const void*); | |
172 | ||
173 | bool isNewlyAllocated(const void*); | |
174 | void setNewlyAllocated(const void*); | |
175 | void clearNewlyAllocated(const void*); | |
176 | ||
177 | bool isAllocated() const; | |
178 | bool needsSweeping(); | |
179 | void didRetireBlock(const FreeList&); | |
180 | void willRemoveBlock(); | |
181 | ||
182 | template <typename Functor> IterationStatus forEachCell(Functor&); | |
183 | template <typename Functor> IterationStatus forEachLiveCell(Functor&); | |
184 | template <typename Functor> IterationStatus forEachDeadCell(Functor&); | |
185 | ||
186 | static ptrdiff_t offsetOfMarks() { return OBJECT_OFFSETOF(MarkedBlock, m_marks); } | |
187 | ||
188 | private: | |
189 | static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two. | |
190 | ||
191 | enum BlockState { New, FreeListed, Allocated, Marked, Retired }; | |
192 | template<bool callDestructors> FreeList sweepHelper(SweepMode = SweepOnly); | |
193 | ||
194 | typedef char Atom[atomSize]; | |
195 | ||
196 | MarkedBlock(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction); | |
197 | Atom* atoms(); | |
198 | size_t atomNumber(const void*); | |
199 | void callDestructor(JSCell*); | |
200 | template<BlockState, SweepMode, bool callDestructors> FreeList specializedSweep(); | |
201 | ||
202 | MarkedBlock* m_prev; | |
203 | MarkedBlock* m_next; | |
204 | ||
205 | size_t m_atomsPerCell; | |
206 | size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. | |
207 | #if ENABLE(PARALLEL_GC) | |
208 | WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks; | |
209 | #else | |
210 | WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_marks; | |
211 | #endif | |
212 | std::unique_ptr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated; | |
213 | ||
214 | size_t m_capacity; | |
215 | bool m_needsDestruction; | |
216 | MarkedAllocator* m_allocator; | |
217 | BlockState m_state; | |
218 | WeakSet m_weakSet; | |
219 | }; | |
220 | ||
221 | inline MarkedBlock::FreeList::FreeList() | |
222 | : head(0) | |
223 | , bytes(0) | |
224 | { | |
225 | } | |
226 | ||
227 | inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes) | |
228 | : head(head) | |
229 | , bytes(bytes) | |
230 | { | |
231 | } | |
232 | ||
233 | inline size_t MarkedBlock::firstAtom() | |
234 | { | |
235 | return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize; | |
236 | } | |
237 | ||
238 | inline MarkedBlock::Atom* MarkedBlock::atoms() | |
239 | { | |
240 | return reinterpret_cast<Atom*>(this); | |
241 | } | |
242 | ||
243 | inline bool MarkedBlock::isAtomAligned(const void* p) | |
244 | { | |
245 | return !(reinterpret_cast<Bits>(p) & atomAlignmentMask); | |
246 | } | |
247 | ||
248 | inline MarkedBlock* MarkedBlock::blockFor(const void* p) | |
249 | { | |
250 | return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask); | |
251 | } | |
252 | ||
253 | inline MarkedAllocator* MarkedBlock::allocator() const | |
254 | { | |
255 | return m_allocator; | |
256 | } | |
257 | ||
258 | inline Heap* MarkedBlock::heap() const | |
259 | { | |
260 | return m_weakSet.heap(); | |
261 | } | |
262 | ||
263 | inline VM* MarkedBlock::vm() const | |
264 | { | |
265 | return m_weakSet.vm(); | |
266 | } | |
267 | ||
268 | inline WeakSet& MarkedBlock::weakSet() | |
269 | { | |
270 | return m_weakSet; | |
271 | } | |
272 | ||
273 | inline void MarkedBlock::shrink() | |
274 | { | |
275 | m_weakSet.shrink(); | |
276 | } | |
277 | ||
278 | inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor) | |
279 | { | |
280 | m_weakSet.visit(heapRootVisitor); | |
281 | } | |
282 | ||
283 | inline void MarkedBlock::reapWeakSet() | |
284 | { | |
285 | m_weakSet.reap(); | |
286 | } | |
287 | ||
288 | inline void MarkedBlock::willRemoveBlock() | |
289 | { | |
290 | ASSERT(m_state != Retired); | |
291 | } | |
292 | ||
293 | inline void MarkedBlock::didConsumeFreeList() | |
294 | { | |
295 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
296 | ||
297 | ASSERT(m_state == FreeListed); | |
298 | m_state = Allocated; | |
299 | } | |
300 | ||
301 | inline void MarkedBlock::didConsumeEmptyFreeList() | |
302 | { | |
303 | HEAP_LOG_BLOCK_STATE_TRANSITION(this); | |
304 | ||
305 | ASSERT(!m_newlyAllocated); | |
306 | ASSERT(m_state == FreeListed); | |
307 | m_state = Marked; | |
308 | } | |
309 | ||
310 | inline size_t MarkedBlock::markCount() | |
311 | { | |
312 | return m_marks.count(); | |
313 | } | |
314 | ||
315 | inline bool MarkedBlock::isEmpty() | |
316 | { | |
317 | return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty()); | |
318 | } | |
319 | ||
320 | inline size_t MarkedBlock::cellSize() | |
321 | { | |
322 | return m_atomsPerCell * atomSize; | |
323 | } | |
324 | ||
325 | inline bool MarkedBlock::needsDestruction() const | |
326 | { | |
327 | return m_needsDestruction; | |
328 | } | |
329 | ||
330 | inline size_t MarkedBlock::size() | |
331 | { | |
332 | return markCount() * cellSize(); | |
333 | } | |
334 | ||
335 | inline size_t MarkedBlock::capacity() | |
336 | { | |
337 | return m_capacity; | |
338 | } | |
339 | ||
340 | inline size_t MarkedBlock::atomNumber(const void* p) | |
341 | { | |
342 | return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize; | |
343 | } | |
344 | ||
345 | inline bool MarkedBlock::isMarked(const void* p) | |
346 | { | |
347 | return m_marks.get(atomNumber(p)); | |
348 | } | |
349 | ||
350 | inline bool MarkedBlock::testAndSetMarked(const void* p) | |
351 | { | |
352 | return m_marks.concurrentTestAndSet(atomNumber(p)); | |
353 | } | |
354 | ||
355 | inline void MarkedBlock::setMarked(const void* p) | |
356 | { | |
357 | m_marks.set(atomNumber(p)); | |
358 | } | |
359 | ||
360 | inline void MarkedBlock::clearMarked(const void* p) | |
361 | { | |
362 | ASSERT(m_marks.get(atomNumber(p))); | |
363 | m_marks.clear(atomNumber(p)); | |
364 | } | |
365 | ||
366 | inline bool MarkedBlock::isNewlyAllocated(const void* p) | |
367 | { | |
368 | return m_newlyAllocated->get(atomNumber(p)); | |
369 | } | |
370 | ||
371 | inline void MarkedBlock::setNewlyAllocated(const void* p) | |
372 | { | |
373 | m_newlyAllocated->set(atomNumber(p)); | |
374 | } | |
375 | ||
376 | inline void MarkedBlock::clearNewlyAllocated(const void* p) | |
377 | { | |
378 | m_newlyAllocated->clear(atomNumber(p)); | |
379 | } | |
380 | ||
381 | inline bool MarkedBlock::clearNewlyAllocated() | |
382 | { | |
383 | if (m_newlyAllocated) { | |
384 | m_newlyAllocated = nullptr; | |
385 | return true; | |
386 | } | |
387 | return false; | |
388 | } | |
389 | ||
390 | inline bool MarkedBlock::isMarkedOrNewlyAllocated(const JSCell* cell) | |
391 | { | |
392 | ASSERT(m_state == Retired || m_state == Marked); | |
393 | return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell)); | |
394 | } | |
395 | ||
396 | inline bool MarkedBlock::isLive(const JSCell* cell) | |
397 | { | |
398 | switch (m_state) { | |
399 | case Allocated: | |
400 | return true; | |
401 | ||
402 | case Retired: | |
403 | case Marked: | |
404 | return isMarkedOrNewlyAllocated(cell); | |
405 | ||
406 | case New: | |
407 | case FreeListed: | |
408 | RELEASE_ASSERT_NOT_REACHED(); | |
409 | return false; | |
410 | } | |
411 | ||
412 | RELEASE_ASSERT_NOT_REACHED(); | |
413 | return false; | |
414 | } | |
415 | ||
416 | inline bool MarkedBlock::isLiveCell(const void* p) | |
417 | { | |
418 | ASSERT(MarkedBlock::isAtomAligned(p)); | |
419 | size_t atomNumber = this->atomNumber(p); | |
420 | size_t firstAtom = this->firstAtom(); | |
421 | if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata. | |
422 | return false; | |
423 | if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles. | |
424 | return false; | |
425 | if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range. | |
426 | return false; | |
427 | ||
428 | return isLive(static_cast<const JSCell*>(p)); | |
429 | } | |
430 | ||
431 | template <typename Functor> inline IterationStatus MarkedBlock::forEachCell(Functor& functor) | |
432 | { | |
433 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
434 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
435 | if (functor(cell) == IterationStatus::Done) | |
436 | return IterationStatus::Done; | |
437 | } | |
438 | return IterationStatus::Continue; | |
439 | } | |
440 | ||
441 | template <typename Functor> inline IterationStatus MarkedBlock::forEachLiveCell(Functor& functor) | |
442 | { | |
443 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
444 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
445 | if (!isLive(cell)) | |
446 | continue; | |
447 | ||
448 | if (functor(cell) == IterationStatus::Done) | |
449 | return IterationStatus::Done; | |
450 | } | |
451 | return IterationStatus::Continue; | |
452 | } | |
453 | ||
454 | template <typename Functor> inline IterationStatus MarkedBlock::forEachDeadCell(Functor& functor) | |
455 | { | |
456 | for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { | |
457 | JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); | |
458 | if (isLive(cell)) | |
459 | continue; | |
460 | ||
461 | if (functor(cell) == IterationStatus::Done) | |
462 | return IterationStatus::Done; | |
463 | } | |
464 | return IterationStatus::Continue; | |
465 | } | |
466 | ||
467 | inline bool MarkedBlock::needsSweeping() | |
468 | { | |
469 | return m_state == Marked; | |
470 | } | |
471 | ||
472 | inline bool MarkedBlock::isAllocated() const | |
473 | { | |
474 | return m_state == Allocated; | |
475 | } | |
476 | ||
477 | } // namespace JSC | |
478 | ||
479 | namespace WTF { | |
480 | ||
481 | struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> { | |
482 | static unsigned hash(JSC::MarkedBlock* const& key) | |
483 | { | |
484 | // Aligned VM regions tend to be monotonically increasing integers, | |
485 | // which is a great hash function, but we have to remove the low bits, | |
486 | // since they're always zero, which is a terrible hash function! | |
487 | return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize; | |
488 | } | |
489 | }; | |
490 | ||
491 | template<> struct DefaultHash<JSC::MarkedBlock*> { | |
492 | typedef MarkedBlockHash Hash; | |
493 | }; | |
494 | ||
495 | } // namespace WTF | |
496 | ||
497 | #endif // MarkedBlock_h |