2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
30 #include "JSGlobalObject.h"
32 #include "JSONObject.h"
36 #include "MarkStack.h"
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
84 #if HAVE(PTHREAD_NP_H)
85 #include <pthread_np.h>
90 #include <sys/procfs.h>
97 #define COLLECT_ON_EVERY_ALLOCATION 0
103 // tunable parameters
105 const size_t GROWTH_FACTOR
= 2;
106 const size_t LOW_WATER_FACTOR
= 4;
107 const size_t ALLOCATIONS_PER_COLLECTION
= 3600;
108 // This value has to be a macro to be used in max() without introducing
109 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
110 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
113 const size_t MAX_NUM_BLOCKS
= 256; // Max size of collector heap set to 16 MB
114 static RHeap
* userChunk
= 0;
117 #if ENABLE(JSC_MULTIPLE_THREADS)
120 typedef mach_port_t PlatformThread
;
122 typedef HANDLE PlatformThread
;
127 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
128 : posixThread(pthread
)
129 , platformThread(platThread
)
135 pthread_t posixThread
;
136 PlatformThread platformThread
;
142 Heap::Heap(JSGlobalData
* globalData
)
144 #if ENABLE(JSC_MULTIPLE_THREADS)
145 , m_registeredThreads(0)
146 , m_currentThreadRegistrar(0)
148 , m_globalData(globalData
)
153 // Symbian OpenC supports mmap but currently not the MAP_ANON flag.
154 // Using fastMalloc() does not properly align blocks on 64k boundaries
155 // and previous implementation was flawed/incomplete.
156 // UserHeap::ChunkHeap allows allocation of continuous memory and specification
157 // of alignment value for (symbian) cells within that heap.
159 // Clarification and mapping of terminology:
160 // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk,
161 // which can dynamically grow up to 8 MB,
162 // that holds all CollectorBlocks of this session (static).
163 // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock.
164 // JSCell objects are maintained as usual within CollectorBlocks.
166 userChunk
= UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS
* BLOCK_SIZE
, BLOCK_SIZE
, BLOCK_SIZE
);
170 #endif // OS(SYMBIAN)
172 memset(&m_heap
, 0, sizeof(CollectorHeap
));
178 // The destroy function must already have been called, so assert this.
179 ASSERT(!m_globalData
);
184 JSLock
lock(SilenceAssertionsOnly
);
189 ASSERT(!m_globalData
->dynamicGlobalObject
);
192 // The global object is not GC protected at this point, so sweeping may delete it
193 // (and thus the global data) before other objects that may use the global data.
194 RefPtr
<JSGlobalData
> protect(m_globalData
);
196 delete m_markListSet
;
201 #if ENABLE(JSC_MULTIPLE_THREADS)
202 if (m_currentThreadRegistrar
) {
203 int error
= pthread_key_delete(m_currentThreadRegistrar
);
204 ASSERT_UNUSED(error
, !error
);
207 MutexLocker
registeredThreadsLock(m_registeredThreadsMutex
);
208 for (Heap::Thread
* t
= m_registeredThreads
; t
;) {
209 Heap::Thread
* next
= t
->next
;
218 NEVER_INLINE CollectorBlock
* Heap::allocateBlock()
221 vm_address_t address
= 0;
222 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
| VM_TAG_FOR_COLLECTOR_MEMORY
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
224 // Allocate a 64 kb aligned CollectorBlock
225 unsigned char* mask
= reinterpret_cast<unsigned char*>(userChunk
->Alloc(BLOCK_SIZE
));
228 uintptr_t address
= reinterpret_cast<uintptr_t>(mask
);
230 void* address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
233 void* address
= __mingw_aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
235 void* address
= _aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
237 memset(address
, 0, BLOCK_SIZE
);
238 #elif HAVE(POSIX_MEMALIGN)
240 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
243 #if ENABLE(JSC_MULTIPLE_THREADS)
244 #error Need to initialize pagesize safely.
246 static size_t pagesize
= getpagesize();
249 if (BLOCK_SIZE
> pagesize
)
250 extra
= BLOCK_SIZE
- pagesize
;
252 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
253 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
256 if ((address
& BLOCK_OFFSET_MASK
) != 0)
257 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
260 munmap(reinterpret_cast<char*>(address
), adjust
);
263 munmap(reinterpret_cast<char*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
270 CollectorBlock
* block
= reinterpret_cast<CollectorBlock
*>(address
);
272 clearMarkBits(block
);
274 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
275 for (size_t i
= 0; i
< HeapConstants::cellsPerBlock
; ++i
)
276 new (block
->cells
+ i
) JSCell(dummyMarkableCellStructure
);
278 // Add block to blocks vector.
280 size_t numBlocks
= m_heap
.numBlocks
;
281 if (m_heap
.usedBlocks
== numBlocks
) {
282 static const size_t maxNumBlocks
= ULONG_MAX
/ sizeof(CollectorBlock
*) / GROWTH_FACTOR
;
283 if (numBlocks
> maxNumBlocks
)
285 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
286 m_heap
.numBlocks
= numBlocks
;
287 m_heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(m_heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
289 m_heap
.blocks
[m_heap
.usedBlocks
++] = block
;
294 NEVER_INLINE
void Heap::freeBlock(size_t block
)
296 m_heap
.didShrink
= true;
298 ObjectIterator
it(m_heap
, block
);
299 ObjectIterator
end(m_heap
, block
+ 1);
300 for ( ; it
!= end
; ++it
)
302 freeBlockPtr(m_heap
.blocks
[block
]);
304 // swap with the last block so we compact as we go
305 m_heap
.blocks
[block
] = m_heap
.blocks
[m_heap
.usedBlocks
- 1];
308 if (m_heap
.numBlocks
> MIN_ARRAY_SIZE
&& m_heap
.usedBlocks
< m_heap
.numBlocks
/ LOW_WATER_FACTOR
) {
309 m_heap
.numBlocks
= m_heap
.numBlocks
/ GROWTH_FACTOR
;
310 m_heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(m_heap
.blocks
, m_heap
.numBlocks
* sizeof(CollectorBlock
*)));
314 NEVER_INLINE
void Heap::freeBlockPtr(CollectorBlock
* block
)
317 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
319 userChunk
->Free(reinterpret_cast<TAny
*>(block
));
321 VirtualFree(block
, 0, MEM_RELEASE
);
324 __mingw_aligned_free(block
);
326 _aligned_free(block
);
328 #elif HAVE(POSIX_MEMALIGN)
331 munmap(reinterpret_cast<char*>(block
), BLOCK_SIZE
);
335 void Heap::freeBlocks()
337 ProtectCountSet protectedValuesCopy
= m_protectedValues
;
340 ProtectCountSet::iterator protectedValuesEnd
= protectedValuesCopy
.end();
341 for (ProtectCountSet::iterator it
= protectedValuesCopy
.begin(); it
!= protectedValuesEnd
; ++it
)
345 m_heap
.nextBlock
= 0;
346 DeadObjectIterator
it(m_heap
, m_heap
.nextBlock
, m_heap
.nextCell
);
347 DeadObjectIterator
end(m_heap
, m_heap
.usedBlocks
);
348 for ( ; it
!= end
; ++it
)
351 ASSERT(!protectedObjectCount());
353 protectedValuesEnd
= protectedValuesCopy
.end();
354 for (ProtectCountSet::iterator it
= protectedValuesCopy
.begin(); it
!= protectedValuesEnd
; ++it
)
355 it
->first
->~JSCell();
357 for (size_t block
= 0; block
< m_heap
.usedBlocks
; ++block
)
358 freeBlockPtr(m_heap
.blocks
[block
]);
360 fastFree(m_heap
.blocks
);
362 memset(&m_heap
, 0, sizeof(CollectorHeap
));
365 void Heap::recordExtraCost(size_t cost
)
367 // Our frequency of garbage collection tries to balance memory use against speed
368 // by collecting based on the number of newly created values. However, for values
369 // that hold on to a great deal of memory that's not in the form of other JS values,
370 // that is not good enough - in some cases a lot of those objects can pile up and
371 // use crazy amounts of memory without a GC happening. So we track these extra
372 // memory costs. Only unusually large objects are noted, and we only keep track
373 // of this extra cost until the next GC. In garbage collected languages, most values
374 // are either very short lived temporaries, or have extremely long lifetimes. So
375 // if a large value survives one garbage collection, there is not much point to
376 // collecting more frequently as long as it stays alive.
378 if (m_heap
.extraCost
> maxExtraCost
&& m_heap
.extraCost
> m_heap
.usedBlocks
* BLOCK_SIZE
/ 2) {
379 // If the last iteration through the heap deallocated blocks, we need
380 // to clean up remaining garbage before marking. Otherwise, the conservative
381 // marking mechanism might follow a pointer to unmapped memory.
382 if (m_heap
.didShrink
)
386 m_heap
.extraCost
+= cost
;
389 void* Heap::allocate(size_t s
)
391 typedef HeapConstants::Block Block
;
392 typedef HeapConstants::Cell Cell
;
394 ASSERT(JSLock::lockCount() > 0);
395 ASSERT(JSLock::currentThreadIsHoldingLock());
396 ASSERT_UNUSED(s
, s
<= HeapConstants::cellSize
);
398 ASSERT(m_heap
.operationInProgress
== NoOperation
);
400 #if COLLECT_ON_EVERY_ALLOCATION
402 ASSERT(m_heap
.operationInProgress
== NoOperation
);
407 // Fast case: find the next garbage cell and recycle it.
410 ASSERT(m_heap
.nextBlock
< m_heap
.usedBlocks
);
411 Block
* block
= reinterpret_cast<Block
*>(m_heap
.blocks
[m_heap
.nextBlock
]);
413 ASSERT(m_heap
.nextCell
< HeapConstants::cellsPerBlock
);
414 if (!block
->marked
.get(m_heap
.nextCell
)) { // Always false for the last cell in the block
415 Cell
* cell
= block
->cells
+ m_heap
.nextCell
;
417 m_heap
.operationInProgress
= Allocation
;
418 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
420 m_heap
.operationInProgress
= NoOperation
;
425 } while (++m_heap
.nextCell
!= HeapConstants::cellsPerBlock
);
427 } while (++m_heap
.nextBlock
!= m_heap
.usedBlocks
);
429 // Slow case: reached the end of the heap. Mark live objects and start over.
435 void Heap::resizeBlocks()
437 m_heap
.didShrink
= false;
439 size_t usedCellCount
= markedCells();
440 size_t minCellCount
= usedCellCount
+ max(ALLOCATIONS_PER_COLLECTION
, usedCellCount
);
441 size_t minBlockCount
= (minCellCount
+ HeapConstants::cellsPerBlock
- 1) / HeapConstants::cellsPerBlock
;
443 size_t maxCellCount
= 1.25f
* minCellCount
;
444 size_t maxBlockCount
= (maxCellCount
+ HeapConstants::cellsPerBlock
- 1) / HeapConstants::cellsPerBlock
;
446 if (m_heap
.usedBlocks
< minBlockCount
)
447 growBlocks(minBlockCount
);
448 else if (m_heap
.usedBlocks
> maxBlockCount
)
449 shrinkBlocks(maxBlockCount
);
452 void Heap::growBlocks(size_t neededBlocks
)
454 ASSERT(m_heap
.usedBlocks
< neededBlocks
);
455 while (m_heap
.usedBlocks
< neededBlocks
)
459 void Heap::shrinkBlocks(size_t neededBlocks
)
461 ASSERT(m_heap
.usedBlocks
> neededBlocks
);
463 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
464 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
465 m_heap
.blocks
[i
]->marked
.clear(HeapConstants::cellsPerBlock
- 1);
467 for (size_t i
= 0; i
!= m_heap
.usedBlocks
&& m_heap
.usedBlocks
!= neededBlocks
; ) {
468 if (m_heap
.blocks
[i
]->marked
.isEmpty()) {
474 // Reset the always-on last bit.
475 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
476 m_heap
.blocks
[i
]->marked
.set(HeapConstants::cellsPerBlock
- 1);
480 void* g_stackBase
= 0;
482 inline bool isPageWritable(void* page
)
484 MEMORY_BASIC_INFORMATION memoryInformation
;
485 DWORD result
= VirtualQuery(page
, &memoryInformation
, sizeof(memoryInformation
));
487 // return false on error, including ptr outside memory
488 if (result
!= sizeof(memoryInformation
))
491 DWORD protect
= memoryInformation
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
);
492 return protect
== PAGE_READWRITE
493 || protect
== PAGE_WRITECOPY
494 || protect
== PAGE_EXECUTE_READWRITE
495 || protect
== PAGE_EXECUTE_WRITECOPY
;
498 static void* getStackBase(void* previousFrame
)
500 // find the address of this stack frame by taking the address of a local variable
501 bool isGrowingDownward
;
502 void* thisFrame
= (void*)(&isGrowingDownward
);
504 isGrowingDownward
= previousFrame
< &thisFrame
;
505 static DWORD pageSize
= 0;
507 SYSTEM_INFO systemInfo
;
508 GetSystemInfo(&systemInfo
);
509 pageSize
= systemInfo
.dwPageSize
;
512 // scan all of memory starting from this frame, and return the last writeable page found
513 register char* currentPage
= (char*)((DWORD
)thisFrame
& ~(pageSize
- 1));
514 if (isGrowingDownward
) {
515 while (currentPage
> 0) {
516 // check for underflow
517 if (currentPage
>= (char*)pageSize
)
518 currentPage
-= pageSize
;
521 if (!isPageWritable(currentPage
))
522 return currentPage
+ pageSize
;
527 // guaranteed to complete because isPageWritable returns false at end of memory
528 currentPage
+= pageSize
;
529 if (!isPageWritable(currentPage
))
537 static inline void *currentThreadStackBaseQNX()
539 static void* stackBase
= 0;
540 static size_t stackSize
= 0;
541 static pthread_t stackThread
;
542 pthread_t thread
= pthread_self();
543 if (stackBase
== 0 || thread
!= stackThread
) {
544 struct _debug_thread_info threadInfo
;
545 memset(&threadInfo
, 0, sizeof(threadInfo
));
546 threadInfo
.tid
= pthread_self();
547 int fd
= open("/proc/self", O_RDONLY
);
549 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno
);
552 devctl(fd
, DCMD_PROC_TIDSTATUS
, &threadInfo
, sizeof(threadInfo
), 0);
554 stackBase
= reinterpret_cast<void*>(threadInfo
.stkbase
);
555 stackSize
= threadInfo
.stksize
;
557 stackThread
= thread
;
559 return static_cast<char*>(stackBase
) + stackSize
;
563 static inline void* currentThreadStackBase()
566 pthread_t thread
= pthread_self();
567 return pthread_get_stackaddr_np(thread
);
568 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
569 // offset 0x18 from the FS segment register gives a pointer to
570 // the thread information block for the current thread
576 return static_cast<void*>(pTib
->StackBase
);
577 #elif OS(WINDOWS) && CPU(X86_64) && COMPILER(MSVC)
578 // FIXME: why only for MSVC?
579 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
580 return reinterpret_cast<void*>(pTib
->StackBase
);
581 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
582 // offset 0x18 from the FS segment register gives a pointer to
583 // the thread information block for the current thread
585 asm ( "movl %%fs:0x18, %0\n"
588 return static_cast<void*>(pTib
->StackBase
);
590 return currentThreadStackBaseQNX();
596 pthread_t thread
= pthread_self();
598 pthread_stackseg_np(thread
, &stack
);
601 static void* stackBase
= 0;
602 if (stackBase
== 0) {
603 TThreadStackInfo info
;
605 thread
.StackInfo(info
);
606 stackBase
= (void*)info
.iBase
;
608 return (void*)stackBase
;
610 thread_info threadInfo
;
611 get_thread_info(find_thread(NULL
), &threadInfo
);
612 return threadInfo
.stack_end
;
614 static void* stackBase
= 0;
615 static size_t stackSize
= 0;
616 static pthread_t stackThread
;
617 pthread_t thread
= pthread_self();
618 if (stackBase
== 0 || thread
!= stackThread
) {
619 pthread_attr_t sattr
;
620 pthread_attr_init(&sattr
);
621 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
622 // e.g. on FreeBSD 5.4, neundorf@kde.org
623 pthread_attr_get_np(thread
, &sattr
);
625 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
626 pthread_getattr_np(thread
, &sattr
);
628 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
629 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
631 pthread_attr_destroy(&sattr
);
632 stackThread
= thread
;
634 return static_cast<char*>(stackBase
) + stackSize
;
640 return getStackBase(&dummy
);
643 #error Need a way to get the stack base on this platform
647 #if ENABLE(JSC_MULTIPLE_THREADS)
649 static inline PlatformThread
getCurrentPlatformThread()
652 return pthread_mach_thread_np(pthread_self());
654 return pthread_getw32threadhandle_np(pthread_self());
658 void Heap::makeUsableFromMultipleThreads()
660 if (m_currentThreadRegistrar
)
663 int error
= pthread_key_create(&m_currentThreadRegistrar
, unregisterThread
);
668 void Heap::registerThread()
670 ASSERT(!m_globalData
->mainThreadOnly
|| isMainThread() || pthread_main_np());
672 if (!m_currentThreadRegistrar
|| pthread_getspecific(m_currentThreadRegistrar
))
675 pthread_setspecific(m_currentThreadRegistrar
, this);
676 Heap::Thread
* thread
= new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
678 MutexLocker
lock(m_registeredThreadsMutex
);
680 thread
->next
= m_registeredThreads
;
681 m_registeredThreads
= thread
;
684 void Heap::unregisterThread(void* p
)
687 static_cast<Heap
*>(p
)->unregisterThread();
690 void Heap::unregisterThread()
692 pthread_t currentPosixThread
= pthread_self();
694 MutexLocker
lock(m_registeredThreadsMutex
);
696 if (pthread_equal(currentPosixThread
, m_registeredThreads
->posixThread
)) {
697 Thread
* t
= m_registeredThreads
;
698 m_registeredThreads
= m_registeredThreads
->next
;
701 Heap::Thread
* last
= m_registeredThreads
;
703 for (t
= m_registeredThreads
->next
; t
; t
= t
->next
) {
704 if (pthread_equal(t
->posixThread
, currentPosixThread
)) {
705 last
->next
= t
->next
;
710 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
715 #else // ENABLE(JSC_MULTIPLE_THREADS)
717 void Heap::registerThread()
723 inline bool isPointerAligned(void* p
)
725 return (((intptr_t)(p
) & (sizeof(char*) - 1)) == 0);
728 // Cell size needs to be a power of two for isPossibleCell to be valid.
729 COMPILE_ASSERT(sizeof(CollectorCell
) % 2 == 0, Collector_cell_size_is_power_of_two
);
732 static bool isHalfCellAligned(void *p
)
734 return (((intptr_t)(p
) & (CELL_MASK
>> 1)) == 0);
737 static inline bool isPossibleCell(void* p
)
739 return isHalfCellAligned(p
) && p
;
744 static inline bool isCellAligned(void *p
)
746 return (((intptr_t)(p
) & CELL_MASK
) == 0);
749 static inline bool isPossibleCell(void* p
)
751 return isCellAligned(p
) && p
;
753 #endif // USE(JSVALUE32)
755 void Heap::markConservatively(MarkStack
& markStack
, void* start
, void* end
)
763 ASSERT((static_cast<char*>(end
) - static_cast<char*>(start
)) < 0x1000000);
764 ASSERT(isPointerAligned(start
));
765 ASSERT(isPointerAligned(end
));
767 char** p
= static_cast<char**>(start
);
768 char** e
= static_cast<char**>(end
);
770 CollectorBlock
** blocks
= m_heap
.blocks
;
773 if (isPossibleCell(x
)) {
775 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
776 xAsBits
&= CELL_ALIGN_MASK
;
778 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
779 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
780 if (offset
> lastCellOffset
)
783 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
784 usedBlocks
= m_heap
.usedBlocks
;
785 for (size_t block
= 0; block
< usedBlocks
; block
++) {
786 if (blocks
[block
] != blockAddr
)
788 markStack
.append(reinterpret_cast<JSCell
*>(xAsBits
));
795 void NEVER_INLINE
Heap::markCurrentThreadConservativelyInternal(MarkStack
& markStack
)
798 void* stackPointer
= &dummy
;
799 void* stackBase
= currentThreadStackBase();
800 markConservatively(markStack
, stackPointer
, stackBase
);
804 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
806 #define REGISTER_BUFFER_ALIGNMENT
809 void Heap::markCurrentThreadConservatively(MarkStack
& markStack
)
811 // setjmp forces volatile registers onto the stack
812 jmp_buf registers REGISTER_BUFFER_ALIGNMENT
;
814 #pragma warning(push)
815 #pragma warning(disable: 4611)
822 markCurrentThreadConservativelyInternal(markStack
);
825 #if ENABLE(JSC_MULTIPLE_THREADS)
827 static inline void suspendThread(const PlatformThread
& platformThread
)
830 thread_suspend(platformThread
);
832 SuspendThread(platformThread
);
834 #error Need a way to suspend threads on this platform
838 static inline void resumeThread(const PlatformThread
& platformThread
)
841 thread_resume(platformThread
);
843 ResumeThread(platformThread
);
845 #error Need a way to resume threads on this platform
849 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
854 typedef i386_thread_state_t PlatformThreadRegisters
;
856 typedef x86_thread_state64_t PlatformThreadRegisters
;
858 typedef ppc_thread_state_t PlatformThreadRegisters
;
860 typedef ppc_thread_state64_t PlatformThreadRegisters
;
862 typedef arm_thread_state_t PlatformThreadRegisters
;
864 #error Unknown Architecture
867 #elif OS(WINDOWS) && CPU(X86)
868 typedef CONTEXT PlatformThreadRegisters
;
870 #error Need a thread register struct for this platform
873 static size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
878 unsigned user_count
= sizeof(regs
)/sizeof(int);
879 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
881 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
882 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
884 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
885 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
887 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
888 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
890 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
891 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
893 #error Unknown Architecture
896 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
897 if (result
!= KERN_SUCCESS
) {
898 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
899 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
902 return user_count
* sizeof(usword_t
);
905 #elif OS(WINDOWS) && CPU(X86)
906 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
907 GetThreadContext(platformThread
, ®s
);
908 return sizeof(CONTEXT
);
910 #error Need a way to get thread registers on this platform
914 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
921 return reinterpret_cast<void*>(regs
.__esp
);
923 return reinterpret_cast<void*>(regs
.__rsp
);
924 #elif CPU(PPC) || CPU(PPC64)
925 return reinterpret_cast<void*>(regs
.__r1
);
927 return reinterpret_cast<void*>(regs
.__sp
);
929 #error Unknown Architecture
932 #else // !__DARWIN_UNIX03
935 return reinterpret_cast<void*>(regs
.esp
);
937 return reinterpret_cast<void*>(regs
.rsp
);
938 #elif CPU(PPC) || CPU(PPC64)
939 return reinterpret_cast<void*>(regs
.r1
);
941 #error Unknown Architecture
944 #endif // __DARWIN_UNIX03
947 #elif CPU(X86) && OS(WINDOWS)
948 return reinterpret_cast<void*>((uintptr_t) regs
.Esp
);
950 #error Need a way to get the stack pointer for another thread on this platform
954 void Heap::markOtherThreadConservatively(MarkStack
& markStack
, Thread
* thread
)
956 suspendThread(thread
->platformThread
);
958 PlatformThreadRegisters regs
;
959 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
961 // mark the thread's registers
962 markConservatively(markStack
, static_cast<void*>(®s
), static_cast<void*>(reinterpret_cast<char*>(®s
) + regSize
));
964 void* stackPointer
= otherThreadStackPointer(regs
);
965 markConservatively(markStack
, stackPointer
, thread
->stackBase
);
967 resumeThread(thread
->platformThread
);
972 void Heap::markStackObjectsConservatively(MarkStack
& markStack
)
974 markCurrentThreadConservatively(markStack
);
976 #if ENABLE(JSC_MULTIPLE_THREADS)
978 if (m_currentThreadRegistrar
) {
980 MutexLocker
lock(m_registeredThreadsMutex
);
983 // Forbid malloc during the mark phase. Marking a thread suspends it, so
984 // a malloc inside markChildren() would risk a deadlock with a thread that had been
985 // suspended while holding the malloc lock.
988 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
989 // and since this is a shared heap, they are real locks.
990 for (Thread
* thread
= m_registeredThreads
; thread
; thread
= thread
->next
) {
991 if (!pthread_equal(thread
->posixThread
, pthread_self()))
992 markOtherThreadConservatively(markStack
, thread
);
1001 void Heap::protect(JSValue k
)
1004 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
1009 m_protectedValues
.add(k
.asCell());
1012 void Heap::unprotect(JSValue k
)
1015 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
1020 m_protectedValues
.remove(k
.asCell());
1023 void Heap::markProtectedObjects(MarkStack
& markStack
)
1025 ProtectCountSet::iterator end
= m_protectedValues
.end();
1026 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
1027 markStack
.append(it
->first
);
1032 void Heap::clearMarkBits()
1034 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
1035 clearMarkBits(m_heap
.blocks
[i
]);
1038 void Heap::clearMarkBits(CollectorBlock
* block
)
1040 // allocate assumes that the last cell in every block is marked.
1041 block
->marked
.clearAll();
1042 block
->marked
.set(HeapConstants::cellsPerBlock
- 1);
1045 size_t Heap::markedCells(size_t startBlock
, size_t startCell
) const
1047 ASSERT(startBlock
<= m_heap
.usedBlocks
);
1048 ASSERT(startCell
< HeapConstants::cellsPerBlock
);
1050 if (startBlock
>= m_heap
.usedBlocks
)
1054 result
+= m_heap
.blocks
[startBlock
]->marked
.count(startCell
);
1055 for (size_t i
= startBlock
+ 1; i
< m_heap
.usedBlocks
; ++i
)
1056 result
+= m_heap
.blocks
[i
]->marked
.count();
1063 ASSERT(m_heap
.operationInProgress
== NoOperation
);
1064 if (m_heap
.operationInProgress
!= NoOperation
)
1066 m_heap
.operationInProgress
= Collection
;
1068 #if !ENABLE(JSC_ZOMBIES)
1069 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
1072 DeadObjectIterator
it(m_heap
, m_heap
.nextBlock
, m_heap
.nextCell
);
1073 DeadObjectIterator
end(m_heap
, m_heap
.usedBlocks
);
1074 for ( ; it
!= end
; ++it
) {
1076 #if ENABLE(JSC_ZOMBIES)
1077 if (!cell
->isZombie()) {
1078 const ClassInfo
* info
= cell
->classInfo();
1080 new (cell
) JSZombie(info
, JSZombie::leakedZombieStructure());
1081 Heap::markCell(cell
);
1085 // Callers of sweep assume it's safe to mark any cell in the heap.
1086 new (cell
) JSCell(dummyMarkableCellStructure
);
1090 m_heap
.operationInProgress
= NoOperation
;
1093 void Heap::markRoots()
1096 if (m_globalData
->isSharedInstance
) {
1097 ASSERT(JSLock::lockCount() > 0);
1098 ASSERT(JSLock::currentThreadIsHoldingLock());
1102 ASSERT(m_heap
.operationInProgress
== NoOperation
);
1103 if (m_heap
.operationInProgress
!= NoOperation
)
1106 m_heap
.operationInProgress
= Collection
;
1108 MarkStack
& markStack
= m_globalData
->markStack
;
1113 // Mark stack roots.
1114 markStackObjectsConservatively(markStack
);
1115 m_globalData
->interpreter
->registerFile().markCallFrames(markStack
, this);
1117 // Mark explicitly registered roots.
1118 markProtectedObjects(markStack
);
1120 // Mark misc. other roots.
1121 if (m_markListSet
&& m_markListSet
->size())
1122 MarkedArgumentBuffer::markLists(markStack
, *m_markListSet
);
1123 if (m_globalData
->exception
)
1124 markStack
.append(m_globalData
->exception
);
1125 if (m_globalData
->functionCodeBlockBeingReparsed
)
1126 m_globalData
->functionCodeBlockBeingReparsed
->markAggregate(markStack
);
1127 if (m_globalData
->firstStringifierToMark
)
1128 JSONObject::markStringifiers(markStack
, m_globalData
->firstStringifierToMark
);
1130 // Mark the small strings cache last, since it will clear itself if nothing
1131 // else has marked it.
1132 m_globalData
->smallStrings
.markChildren(markStack
);
1135 markStack
.compact();
1137 m_heap
.operationInProgress
= NoOperation
;
1140 size_t Heap::objectCount() const
1142 return m_heap
.nextBlock
* HeapConstants::cellsPerBlock
// allocated full blocks
1143 + m_heap
.nextCell
// allocated cells in current block
1144 + markedCells(m_heap
.nextBlock
, m_heap
.nextCell
) // marked cells in remainder of m_heap
1145 - m_heap
.usedBlocks
; // 1 cell per block is a dummy sentinel
1148 void Heap::addToStatistics(Heap::Statistics
& statistics
) const
1150 statistics
.size
+= m_heap
.usedBlocks
* BLOCK_SIZE
;
1151 statistics
.free
+= m_heap
.usedBlocks
* BLOCK_SIZE
- (objectCount() * HeapConstants::cellSize
);
1154 Heap::Statistics
Heap::statistics() const
1156 Statistics statistics
= { 0, 0 };
1157 addToStatistics(statistics
);
1161 size_t Heap::globalObjectCount()
1164 if (JSGlobalObject
* head
= m_globalData
->head
) {
1165 JSGlobalObject
* o
= head
;
1169 } while (o
!= head
);
1174 size_t Heap::protectedGlobalObjectCount()
1177 if (JSGlobalObject
* head
= m_globalData
->head
) {
1178 JSGlobalObject
* o
= head
;
1180 if (m_protectedValues
.contains(o
))
1183 } while (o
!= head
);
1189 size_t Heap::protectedObjectCount()
1191 return m_protectedValues
.size();
1194 static const char* typeName(JSCell
* cell
)
1196 if (cell
->isString())
1199 if (cell
->isNumber())
1202 if (cell
->isGetterSetter())
1203 return "gettersetter";
1204 if (cell
->isAPIValueWrapper())
1205 return "value wrapper";
1206 if (cell
->isPropertyNameIterator())
1207 return "for-in iterator";
1208 ASSERT(cell
->isObject());
1209 const ClassInfo
* info
= cell
->classInfo();
1210 return info
? info
->className
: "Object";
1213 HashCountedSet
<const char*>* Heap::protectedObjectTypeCounts()
1215 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1217 ProtectCountSet::iterator end
= m_protectedValues
.end();
1218 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
1219 counts
->add(typeName(it
->first
));
1226 return m_heap
.operationInProgress
!= NoOperation
;
1231 JAVASCRIPTCORE_GC_BEGIN();
1235 JAVASCRIPTCORE_GC_MARKED();
1237 m_heap
.nextCell
= 0;
1238 m_heap
.nextBlock
= 0;
1239 m_heap
.nextNumber
= 0;
1240 m_heap
.extraCost
= 0;
1241 #if ENABLE(JSC_ZOMBIES)
1246 JAVASCRIPTCORE_GC_END();
1249 void Heap::collectAllGarbage()
1251 JAVASCRIPTCORE_GC_BEGIN();
1253 // If the last iteration through the heap deallocated blocks, we need
1254 // to clean up remaining garbage before marking. Otherwise, the conservative
1255 // marking mechanism might follow a pointer to unmapped memory.
1256 if (m_heap
.didShrink
)
1261 JAVASCRIPTCORE_GC_MARKED();
1263 m_heap
.nextCell
= 0;
1264 m_heap
.nextBlock
= 0;
1265 m_heap
.nextNumber
= 0;
1266 m_heap
.extraCost
= 0;
1270 JAVASCRIPTCORE_GC_END();
1273 LiveObjectIterator
Heap::primaryHeapBegin()
1275 return LiveObjectIterator(m_heap
, 0);
1278 LiveObjectIterator
Heap::primaryHeapEnd()
1280 return LiveObjectIterator(m_heap
, m_heap
.usedBlocks
);