2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
30 #include "JSGlobalObject.h"
32 #include "JSONObject.h"
36 #include "MarkStack.h"
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
79 #if HAVE(PTHREAD_NP_H)
80 #include <pthread_np.h>
85 #include <sys/procfs.h>
92 #define COLLECT_ON_EVERY_ALLOCATION 0
100 const size_t GROWTH_FACTOR
= 2;
101 const size_t LOW_WATER_FACTOR
= 4;
102 const size_t ALLOCATIONS_PER_COLLECTION
= 3600;
103 // This value has to be a macro to be used in max() without introducing
104 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
105 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
107 #if ENABLE(JSC_MULTIPLE_THREADS)
110 typedef mach_port_t PlatformThread
;
112 typedef HANDLE PlatformThread
;
117 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
118 : posixThread(pthread
)
119 , platformThread(platThread
)
125 pthread_t posixThread
;
126 PlatformThread platformThread
;
132 Heap::Heap(JSGlobalData
* globalData
)
134 #if ENABLE(JSC_MULTIPLE_THREADS)
135 , m_registeredThreads(0)
136 , m_currentThreadRegistrar(0)
139 , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION
, BLOCK_SIZE
)
141 , m_globalData(globalData
)
144 memset(&m_heap
, 0, sizeof(CollectorHeap
));
150 // The destroy function must already have been called, so assert this.
151 ASSERT(!m_globalData
);
156 JSLock
lock(SilenceAssertionsOnly
);
161 ASSERT(!m_globalData
->dynamicGlobalObject
);
164 // The global object is not GC protected at this point, so sweeping may delete it
165 // (and thus the global data) before other objects that may use the global data.
166 RefPtr
<JSGlobalData
> protect(m_globalData
);
168 delete m_markListSet
;
173 #if ENABLE(JSC_MULTIPLE_THREADS)
174 if (m_currentThreadRegistrar
) {
175 int error
= pthread_key_delete(m_currentThreadRegistrar
);
176 ASSERT_UNUSED(error
, !error
);
179 MutexLocker
registeredThreadsLock(m_registeredThreadsMutex
);
180 for (Heap::Thread
* t
= m_registeredThreads
; t
;) {
181 Heap::Thread
* next
= t
->next
;
187 m_blockallocator
.destroy();
192 NEVER_INLINE CollectorBlock
* Heap::allocateBlock()
195 vm_address_t address
= 0;
196 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
| VM_TAG_FOR_COLLECTOR_MEMORY
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
198 void* address
= m_blockallocator
.alloc();
202 void* address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
204 #if COMPILER(MINGW) && !COMPILER(MINGW64)
205 void* address
= __mingw_aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
207 void* address
= _aligned_malloc(BLOCK_SIZE
, BLOCK_SIZE
);
209 memset(address
, 0, BLOCK_SIZE
);
210 #elif HAVE(POSIX_MEMALIGN)
212 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
215 #if ENABLE(JSC_MULTIPLE_THREADS)
216 #error Need to initialize pagesize safely.
218 static size_t pagesize
= getpagesize();
221 if (BLOCK_SIZE
> pagesize
)
222 extra
= BLOCK_SIZE
- pagesize
;
224 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
225 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
228 if ((address
& BLOCK_OFFSET_MASK
) != 0)
229 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
232 munmap(reinterpret_cast<char*>(address
), adjust
);
235 munmap(reinterpret_cast<char*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
242 CollectorBlock
* block
= reinterpret_cast<CollectorBlock
*>(address
);
244 clearMarkBits(block
);
246 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
247 for (size_t i
= 0; i
< HeapConstants::cellsPerBlock
; ++i
)
248 new (block
->cells
+ i
) JSCell(dummyMarkableCellStructure
);
250 // Add block to blocks vector.
252 size_t numBlocks
= m_heap
.numBlocks
;
253 if (m_heap
.usedBlocks
== numBlocks
) {
254 static const size_t maxNumBlocks
= ULONG_MAX
/ sizeof(CollectorBlock
*) / GROWTH_FACTOR
;
255 if (numBlocks
> maxNumBlocks
)
257 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
258 m_heap
.numBlocks
= numBlocks
;
259 m_heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(m_heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
261 m_heap
.blocks
[m_heap
.usedBlocks
++] = block
;
266 NEVER_INLINE
void Heap::freeBlock(size_t block
)
268 m_heap
.didShrink
= true;
270 ObjectIterator
it(m_heap
, block
);
271 ObjectIterator
end(m_heap
, block
+ 1);
272 for ( ; it
!= end
; ++it
)
274 freeBlockPtr(m_heap
.blocks
[block
]);
276 // swap with the last block so we compact as we go
277 m_heap
.blocks
[block
] = m_heap
.blocks
[m_heap
.usedBlocks
- 1];
280 if (m_heap
.numBlocks
> MIN_ARRAY_SIZE
&& m_heap
.usedBlocks
< m_heap
.numBlocks
/ LOW_WATER_FACTOR
) {
281 m_heap
.numBlocks
= m_heap
.numBlocks
/ GROWTH_FACTOR
;
282 m_heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(m_heap
.blocks
, m_heap
.numBlocks
* sizeof(CollectorBlock
*)));
286 NEVER_INLINE
void Heap::freeBlockPtr(CollectorBlock
* block
)
289 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
291 m_blockallocator
.free(reinterpret_cast<void*>(block
));
293 VirtualFree(block
, 0, MEM_RELEASE
);
295 #if COMPILER(MINGW) && !COMPILER(MINGW64)
296 __mingw_aligned_free(block
);
298 _aligned_free(block
);
300 #elif HAVE(POSIX_MEMALIGN)
303 munmap(reinterpret_cast<char*>(block
), BLOCK_SIZE
);
307 void Heap::freeBlocks()
309 ProtectCountSet protectedValuesCopy
= m_protectedValues
;
312 ProtectCountSet::iterator protectedValuesEnd
= protectedValuesCopy
.end();
313 for (ProtectCountSet::iterator it
= protectedValuesCopy
.begin(); it
!= protectedValuesEnd
; ++it
)
317 m_heap
.nextBlock
= 0;
318 DeadObjectIterator
it(m_heap
, m_heap
.nextBlock
, m_heap
.nextCell
);
319 DeadObjectIterator
end(m_heap
, m_heap
.usedBlocks
);
320 for ( ; it
!= end
; ++it
)
323 ASSERT(!protectedObjectCount());
325 protectedValuesEnd
= protectedValuesCopy
.end();
326 for (ProtectCountSet::iterator it
= protectedValuesCopy
.begin(); it
!= protectedValuesEnd
; ++it
)
327 it
->first
->~JSCell();
329 for (size_t block
= 0; block
< m_heap
.usedBlocks
; ++block
)
330 freeBlockPtr(m_heap
.blocks
[block
]);
332 fastFree(m_heap
.blocks
);
334 memset(&m_heap
, 0, sizeof(CollectorHeap
));
337 void Heap::recordExtraCost(size_t cost
)
339 // Our frequency of garbage collection tries to balance memory use against speed
340 // by collecting based on the number of newly created values. However, for values
341 // that hold on to a great deal of memory that's not in the form of other JS values,
342 // that is not good enough - in some cases a lot of those objects can pile up and
343 // use crazy amounts of memory without a GC happening. So we track these extra
344 // memory costs. Only unusually large objects are noted, and we only keep track
345 // of this extra cost until the next GC. In garbage collected languages, most values
346 // are either very short lived temporaries, or have extremely long lifetimes. So
347 // if a large value survives one garbage collection, there is not much point to
348 // collecting more frequently as long as it stays alive.
350 if (m_heap
.extraCost
> maxExtraCost
&& m_heap
.extraCost
> m_heap
.usedBlocks
* BLOCK_SIZE
/ 2) {
351 // If the last iteration through the heap deallocated blocks, we need
352 // to clean up remaining garbage before marking. Otherwise, the conservative
353 // marking mechanism might follow a pointer to unmapped memory.
354 if (m_heap
.didShrink
)
358 m_heap
.extraCost
+= cost
;
361 void* Heap::allocate(size_t s
)
363 typedef HeapConstants::Block Block
;
364 typedef HeapConstants::Cell Cell
;
366 ASSERT(JSLock::lockCount() > 0);
367 ASSERT(JSLock::currentThreadIsHoldingLock());
368 ASSERT_UNUSED(s
, s
<= HeapConstants::cellSize
);
370 ASSERT(m_heap
.operationInProgress
== NoOperation
);
372 #if COLLECT_ON_EVERY_ALLOCATION
374 ASSERT(m_heap
.operationInProgress
== NoOperation
);
379 // Fast case: find the next garbage cell and recycle it.
382 ASSERT(m_heap
.nextBlock
< m_heap
.usedBlocks
);
383 Block
* block
= reinterpret_cast<Block
*>(m_heap
.blocks
[m_heap
.nextBlock
]);
385 ASSERT(m_heap
.nextCell
< HeapConstants::cellsPerBlock
);
386 if (!block
->marked
.get(m_heap
.nextCell
)) { // Always false for the last cell in the block
387 Cell
* cell
= block
->cells
+ m_heap
.nextCell
;
389 m_heap
.operationInProgress
= Allocation
;
390 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
392 m_heap
.operationInProgress
= NoOperation
;
397 } while (++m_heap
.nextCell
!= HeapConstants::cellsPerBlock
);
399 } while (++m_heap
.nextBlock
!= m_heap
.usedBlocks
);
401 // Slow case: reached the end of the heap. Mark live objects and start over.
407 void Heap::resizeBlocks()
409 m_heap
.didShrink
= false;
411 size_t usedCellCount
= markedCells();
412 size_t minCellCount
= usedCellCount
+ max(ALLOCATIONS_PER_COLLECTION
, usedCellCount
);
413 size_t minBlockCount
= (minCellCount
+ HeapConstants::cellsPerBlock
- 1) / HeapConstants::cellsPerBlock
;
415 size_t maxCellCount
= 1.25f
* minCellCount
;
416 size_t maxBlockCount
= (maxCellCount
+ HeapConstants::cellsPerBlock
- 1) / HeapConstants::cellsPerBlock
;
418 if (m_heap
.usedBlocks
< minBlockCount
)
419 growBlocks(minBlockCount
);
420 else if (m_heap
.usedBlocks
> maxBlockCount
)
421 shrinkBlocks(maxBlockCount
);
424 void Heap::growBlocks(size_t neededBlocks
)
426 ASSERT(m_heap
.usedBlocks
< neededBlocks
);
427 while (m_heap
.usedBlocks
< neededBlocks
)
431 void Heap::shrinkBlocks(size_t neededBlocks
)
433 ASSERT(m_heap
.usedBlocks
> neededBlocks
);
435 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
436 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
437 m_heap
.blocks
[i
]->marked
.clear(HeapConstants::cellsPerBlock
- 1);
439 for (size_t i
= 0; i
!= m_heap
.usedBlocks
&& m_heap
.usedBlocks
!= neededBlocks
; ) {
440 if (m_heap
.blocks
[i
]->marked
.isEmpty()) {
446 // Reset the always-on last bit.
447 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
448 m_heap
.blocks
[i
]->marked
.set(HeapConstants::cellsPerBlock
- 1);
452 JS_EXPORTDATA
void* g_stackBase
= 0;
454 inline bool isPageWritable(void* page
)
456 MEMORY_BASIC_INFORMATION memoryInformation
;
457 DWORD result
= VirtualQuery(page
, &memoryInformation
, sizeof(memoryInformation
));
459 // return false on error, including ptr outside memory
460 if (result
!= sizeof(memoryInformation
))
463 DWORD protect
= memoryInformation
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
);
464 return protect
== PAGE_READWRITE
465 || protect
== PAGE_WRITECOPY
466 || protect
== PAGE_EXECUTE_READWRITE
467 || protect
== PAGE_EXECUTE_WRITECOPY
;
470 static void* getStackBase(void* previousFrame
)
472 // find the address of this stack frame by taking the address of a local variable
473 bool isGrowingDownward
;
474 void* thisFrame
= (void*)(&isGrowingDownward
);
476 isGrowingDownward
= previousFrame
< &thisFrame
;
477 static DWORD pageSize
= 0;
479 SYSTEM_INFO systemInfo
;
480 GetSystemInfo(&systemInfo
);
481 pageSize
= systemInfo
.dwPageSize
;
484 // scan all of memory starting from this frame, and return the last writeable page found
485 register char* currentPage
= (char*)((DWORD
)thisFrame
& ~(pageSize
- 1));
486 if (isGrowingDownward
) {
487 while (currentPage
> 0) {
488 // check for underflow
489 if (currentPage
>= (char*)pageSize
)
490 currentPage
-= pageSize
;
493 if (!isPageWritable(currentPage
))
494 return currentPage
+ pageSize
;
499 // guaranteed to complete because isPageWritable returns false at end of memory
500 currentPage
+= pageSize
;
501 if (!isPageWritable(currentPage
))
509 static inline void *currentThreadStackBaseQNX()
511 static void* stackBase
= 0;
512 static size_t stackSize
= 0;
513 static pthread_t stackThread
;
514 pthread_t thread
= pthread_self();
515 if (stackBase
== 0 || thread
!= stackThread
) {
516 struct _debug_thread_info threadInfo
;
517 memset(&threadInfo
, 0, sizeof(threadInfo
));
518 threadInfo
.tid
= pthread_self();
519 int fd
= open("/proc/self", O_RDONLY
);
521 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno
);
524 devctl(fd
, DCMD_PROC_TIDSTATUS
, &threadInfo
, sizeof(threadInfo
), 0);
526 stackBase
= reinterpret_cast<void*>(threadInfo
.stkbase
);
527 stackSize
= threadInfo
.stksize
;
529 stackThread
= thread
;
531 return static_cast<char*>(stackBase
) + stackSize
;
535 static inline void* currentThreadStackBase()
538 pthread_t thread
= pthread_self();
539 return pthread_get_stackaddr_np(thread
);
540 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
541 // offset 0x18 from the FS segment register gives a pointer to
542 // the thread information block for the current thread
548 return static_cast<void*>(pTib
->StackBase
);
549 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
550 // offset 0x18 from the FS segment register gives a pointer to
551 // the thread information block for the current thread
553 asm ( "movl %%fs:0x18, %0\n"
556 return static_cast<void*>(pTib
->StackBase
);
557 #elif OS(WINDOWS) && CPU(X86_64)
558 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
559 return reinterpret_cast<void*>(pTib
->StackBase
);
561 AtomicallyInitializedStatic(Mutex
&, mutex
= *new Mutex
);
562 MutexLocker
locker(mutex
);
563 return currentThreadStackBaseQNX();
569 pthread_t thread
= pthread_self();
571 pthread_stackseg_np(thread
, &stack
);
574 TThreadStackInfo info
;
576 thread
.StackInfo(info
);
577 return (void*)info
.iBase
;
579 thread_info threadInfo
;
580 get_thread_info(find_thread(NULL
), &threadInfo
);
581 return threadInfo
.stack_end
;
583 AtomicallyInitializedStatic(Mutex
&, mutex
= *new Mutex
);
584 MutexLocker
locker(mutex
);
585 static void* stackBase
= 0;
586 static size_t stackSize
= 0;
587 static pthread_t stackThread
;
588 pthread_t thread
= pthread_self();
589 if (stackBase
== 0 || thread
!= stackThread
) {
590 pthread_attr_t sattr
;
591 pthread_attr_init(&sattr
);
592 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
593 // e.g. on FreeBSD 5.4, neundorf@kde.org
594 pthread_attr_get_np(thread
, &sattr
);
596 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
597 pthread_getattr_np(thread
, &sattr
);
599 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
600 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
602 pthread_attr_destroy(&sattr
);
603 stackThread
= thread
;
605 return static_cast<char*>(stackBase
) + stackSize
;
607 AtomicallyInitializedStatic(Mutex
&, mutex
= *new Mutex
);
608 MutexLocker
locker(mutex
);
613 return getStackBase(&dummy
);
616 #error Need a way to get the stack base on this platform
620 #if ENABLE(JSC_MULTIPLE_THREADS)
622 static inline PlatformThread
getCurrentPlatformThread()
625 return pthread_mach_thread_np(pthread_self());
627 return pthread_getw32threadhandle_np(pthread_self());
631 void Heap::makeUsableFromMultipleThreads()
633 if (m_currentThreadRegistrar
)
636 int error
= pthread_key_create(&m_currentThreadRegistrar
, unregisterThread
);
641 void Heap::registerThread()
643 ASSERT(!m_globalData
->exclusiveThread
|| m_globalData
->exclusiveThread
== currentThread());
645 if (!m_currentThreadRegistrar
|| pthread_getspecific(m_currentThreadRegistrar
))
648 pthread_setspecific(m_currentThreadRegistrar
, this);
649 Heap::Thread
* thread
= new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
651 MutexLocker
lock(m_registeredThreadsMutex
);
653 thread
->next
= m_registeredThreads
;
654 m_registeredThreads
= thread
;
657 void Heap::unregisterThread(void* p
)
660 static_cast<Heap
*>(p
)->unregisterThread();
663 void Heap::unregisterThread()
665 pthread_t currentPosixThread
= pthread_self();
667 MutexLocker
lock(m_registeredThreadsMutex
);
669 if (pthread_equal(currentPosixThread
, m_registeredThreads
->posixThread
)) {
670 Thread
* t
= m_registeredThreads
;
671 m_registeredThreads
= m_registeredThreads
->next
;
674 Heap::Thread
* last
= m_registeredThreads
;
676 for (t
= m_registeredThreads
->next
; t
; t
= t
->next
) {
677 if (pthread_equal(t
->posixThread
, currentPosixThread
)) {
678 last
->next
= t
->next
;
683 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
688 #else // ENABLE(JSC_MULTIPLE_THREADS)
690 void Heap::registerThread()
696 inline bool isPointerAligned(void* p
)
698 return (((intptr_t)(p
) & (sizeof(char*) - 1)) == 0);
701 // Cell size needs to be a power of two for isPossibleCell to be valid.
702 COMPILE_ASSERT(sizeof(CollectorCell
) % 2 == 0, Collector_cell_size_is_power_of_two
);
705 static bool isHalfCellAligned(void *p
)
707 return (((intptr_t)(p
) & (CELL_MASK
>> 1)) == 0);
710 static inline bool isPossibleCell(void* p
)
712 return isHalfCellAligned(p
) && p
;
717 static inline bool isCellAligned(void *p
)
719 return (((intptr_t)(p
) & CELL_MASK
) == 0);
722 static inline bool isPossibleCell(void* p
)
724 return isCellAligned(p
) && p
;
726 #endif // USE(JSVALUE32)
728 void Heap::markConservatively(MarkStack
& markStack
, void* start
, void* end
)
736 ASSERT((static_cast<char*>(end
) - static_cast<char*>(start
)) < 0x1000000);
737 ASSERT(isPointerAligned(start
));
738 ASSERT(isPointerAligned(end
));
740 char** p
= static_cast<char**>(start
);
741 char** e
= static_cast<char**>(end
);
743 CollectorBlock
** blocks
= m_heap
.blocks
;
746 if (isPossibleCell(x
)) {
748 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
749 xAsBits
&= CELL_ALIGN_MASK
;
751 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
752 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
753 if (offset
> lastCellOffset
)
756 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
757 usedBlocks
= m_heap
.usedBlocks
;
758 for (size_t block
= 0; block
< usedBlocks
; block
++) {
759 if (blocks
[block
] != blockAddr
)
761 markStack
.append(reinterpret_cast<JSCell
*>(xAsBits
));
768 void NEVER_INLINE
Heap::markCurrentThreadConservativelyInternal(MarkStack
& markStack
)
771 void* stackPointer
= &dummy
;
772 void* stackBase
= currentThreadStackBase();
773 markConservatively(markStack
, stackPointer
, stackBase
);
777 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
779 #define REGISTER_BUFFER_ALIGNMENT
782 void Heap::markCurrentThreadConservatively(MarkStack
& markStack
)
784 // setjmp forces volatile registers onto the stack
785 jmp_buf registers REGISTER_BUFFER_ALIGNMENT
;
787 #pragma warning(push)
788 #pragma warning(disable: 4611)
795 markCurrentThreadConservativelyInternal(markStack
);
798 #if ENABLE(JSC_MULTIPLE_THREADS)
800 static inline void suspendThread(const PlatformThread
& platformThread
)
803 thread_suspend(platformThread
);
805 SuspendThread(platformThread
);
807 #error Need a way to suspend threads on this platform
811 static inline void resumeThread(const PlatformThread
& platformThread
)
814 thread_resume(platformThread
);
816 ResumeThread(platformThread
);
818 #error Need a way to resume threads on this platform
822 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
827 typedef i386_thread_state_t PlatformThreadRegisters
;
829 typedef x86_thread_state64_t PlatformThreadRegisters
;
831 typedef ppc_thread_state_t PlatformThreadRegisters
;
833 typedef ppc_thread_state64_t PlatformThreadRegisters
;
835 typedef arm_thread_state_t PlatformThreadRegisters
;
837 #error Unknown Architecture
840 #elif OS(WINDOWS) && CPU(X86)
841 typedef CONTEXT PlatformThreadRegisters
;
843 #error Need a thread register struct for this platform
846 static size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
851 unsigned user_count
= sizeof(regs
)/sizeof(int);
852 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
854 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
855 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
857 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
858 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
860 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
861 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
863 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
864 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
866 #error Unknown Architecture
869 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
870 if (result
!= KERN_SUCCESS
) {
871 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
872 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
875 return user_count
* sizeof(usword_t
);
878 #elif OS(WINDOWS) && CPU(X86)
879 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
880 GetThreadContext(platformThread
, ®s
);
881 return sizeof(CONTEXT
);
883 #error Need a way to get thread registers on this platform
887 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
894 return reinterpret_cast<void*>(regs
.__esp
);
896 return reinterpret_cast<void*>(regs
.__rsp
);
897 #elif CPU(PPC) || CPU(PPC64)
898 return reinterpret_cast<void*>(regs
.__r1
);
900 return reinterpret_cast<void*>(regs
.__sp
);
902 #error Unknown Architecture
905 #else // !__DARWIN_UNIX03
908 return reinterpret_cast<void*>(regs
.esp
);
910 return reinterpret_cast<void*>(regs
.rsp
);
911 #elif CPU(PPC) || CPU(PPC64)
912 return reinterpret_cast<void*>(regs
.r1
);
914 #error Unknown Architecture
917 #endif // __DARWIN_UNIX03
920 #elif CPU(X86) && OS(WINDOWS)
921 return reinterpret_cast<void*>((uintptr_t) regs
.Esp
);
923 #error Need a way to get the stack pointer for another thread on this platform
927 void Heap::markOtherThreadConservatively(MarkStack
& markStack
, Thread
* thread
)
929 suspendThread(thread
->platformThread
);
931 PlatformThreadRegisters regs
;
932 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
934 // mark the thread's registers
935 markConservatively(markStack
, static_cast<void*>(®s
), static_cast<void*>(reinterpret_cast<char*>(®s
) + regSize
));
937 void* stackPointer
= otherThreadStackPointer(regs
);
938 markConservatively(markStack
, stackPointer
, thread
->stackBase
);
940 resumeThread(thread
->platformThread
);
945 void Heap::markStackObjectsConservatively(MarkStack
& markStack
)
947 markCurrentThreadConservatively(markStack
);
949 #if ENABLE(JSC_MULTIPLE_THREADS)
951 if (m_currentThreadRegistrar
) {
953 MutexLocker
lock(m_registeredThreadsMutex
);
956 // Forbid malloc during the mark phase. Marking a thread suspends it, so
957 // a malloc inside markChildren() would risk a deadlock with a thread that had been
958 // suspended while holding the malloc lock.
961 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
962 // and since this is a shared heap, they are real locks.
963 for (Thread
* thread
= m_registeredThreads
; thread
; thread
= thread
->next
) {
964 if (!pthread_equal(thread
->posixThread
, pthread_self()))
965 markOtherThreadConservatively(markStack
, thread
);
974 void Heap::protect(JSValue k
)
977 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance());
982 m_protectedValues
.add(k
.asCell());
985 bool Heap::unprotect(JSValue k
)
988 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance());
993 return m_protectedValues
.remove(k
.asCell());
996 void Heap::markProtectedObjects(MarkStack
& markStack
)
998 ProtectCountSet::iterator end
= m_protectedValues
.end();
999 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
1000 markStack
.append(it
->first
);
1005 void Heap::pushTempSortVector(Vector
<ValueStringPair
>* tempVector
)
1007 m_tempSortingVectors
.append(tempVector
);
1010 void Heap::popTempSortVector(Vector
<ValueStringPair
>* tempVector
)
1012 ASSERT_UNUSED(tempVector
, tempVector
== m_tempSortingVectors
.last());
1013 m_tempSortingVectors
.removeLast();
1016 void Heap::markTempSortVectors(MarkStack
& markStack
)
1018 typedef Vector
<Vector
<ValueStringPair
>* > VectorOfValueStringVectors
;
1020 VectorOfValueStringVectors::iterator end
= m_tempSortingVectors
.end();
1021 for (VectorOfValueStringVectors::iterator it
= m_tempSortingVectors
.begin(); it
!= end
; ++it
) {
1022 Vector
<ValueStringPair
>* tempSortingVector
= *it
;
1024 Vector
<ValueStringPair
>::iterator vectorEnd
= tempSortingVector
->end();
1025 for (Vector
<ValueStringPair
>::iterator vectorIt
= tempSortingVector
->begin(); vectorIt
!= vectorEnd
; ++vectorIt
)
1026 if (vectorIt
->first
)
1027 markStack
.append(vectorIt
->first
);
1032 void Heap::clearMarkBits()
1034 for (size_t i
= 0; i
< m_heap
.usedBlocks
; ++i
)
1035 clearMarkBits(m_heap
.blocks
[i
]);
1038 void Heap::clearMarkBits(CollectorBlock
* block
)
1040 // allocate assumes that the last cell in every block is marked.
1041 block
->marked
.clearAll();
1042 block
->marked
.set(HeapConstants::cellsPerBlock
- 1);
1045 size_t Heap::markedCells(size_t startBlock
, size_t startCell
) const
1047 ASSERT(startBlock
<= m_heap
.usedBlocks
);
1048 ASSERT(startCell
< HeapConstants::cellsPerBlock
);
1050 if (startBlock
>= m_heap
.usedBlocks
)
1054 result
+= m_heap
.blocks
[startBlock
]->marked
.count(startCell
);
1055 for (size_t i
= startBlock
+ 1; i
< m_heap
.usedBlocks
; ++i
)
1056 result
+= m_heap
.blocks
[i
]->marked
.count();
1063 ASSERT(m_heap
.operationInProgress
== NoOperation
);
1064 if (m_heap
.operationInProgress
!= NoOperation
)
1066 m_heap
.operationInProgress
= Collection
;
1068 #if !ENABLE(JSC_ZOMBIES)
1069 Structure
* dummyMarkableCellStructure
= m_globalData
->dummyMarkableCellStructure
.get();
1072 DeadObjectIterator
it(m_heap
, m_heap
.nextBlock
, m_heap
.nextCell
);
1073 DeadObjectIterator
end(m_heap
, m_heap
.usedBlocks
);
1074 for ( ; it
!= end
; ++it
) {
1076 #if ENABLE(JSC_ZOMBIES)
1077 if (!cell
->isZombie()) {
1078 const ClassInfo
* info
= cell
->classInfo();
1080 new (cell
) JSZombie(info
, JSZombie::leakedZombieStructure());
1081 Heap::markCell(cell
);
1085 // Callers of sweep assume it's safe to mark any cell in the heap.
1086 new (cell
) JSCell(dummyMarkableCellStructure
);
1090 m_heap
.operationInProgress
= NoOperation
;
1093 void Heap::markRoots()
1096 if (m_globalData
->isSharedInstance()) {
1097 ASSERT(JSLock::lockCount() > 0);
1098 ASSERT(JSLock::currentThreadIsHoldingLock());
1102 ASSERT(m_heap
.operationInProgress
== NoOperation
);
1103 if (m_heap
.operationInProgress
!= NoOperation
)
1106 m_heap
.operationInProgress
= Collection
;
1108 MarkStack
& markStack
= m_globalData
->markStack
;
1113 // Mark stack roots.
1114 markStackObjectsConservatively(markStack
);
1115 m_globalData
->interpreter
->registerFile().markCallFrames(markStack
, this);
1117 // Mark explicitly registered roots.
1118 markProtectedObjects(markStack
);
1120 // Mark temporary vector for Array sorting
1121 markTempSortVectors(markStack
);
1123 // Mark misc. other roots.
1124 if (m_markListSet
&& m_markListSet
->size())
1125 MarkedArgumentBuffer::markLists(markStack
, *m_markListSet
);
1126 if (m_globalData
->exception
)
1127 markStack
.append(m_globalData
->exception
);
1128 if (m_globalData
->functionCodeBlockBeingReparsed
)
1129 m_globalData
->functionCodeBlockBeingReparsed
->markAggregate(markStack
);
1130 if (m_globalData
->firstStringifierToMark
)
1131 JSONObject::markStringifiers(markStack
, m_globalData
->firstStringifierToMark
);
1133 // Mark the small strings cache last, since it will clear itself if nothing
1134 // else has marked it.
1135 m_globalData
->smallStrings
.markChildren(markStack
);
1138 markStack
.compact();
1140 m_heap
.operationInProgress
= NoOperation
;
1143 size_t Heap::objectCount() const
1145 return m_heap
.nextBlock
* HeapConstants::cellsPerBlock
// allocated full blocks
1146 + m_heap
.nextCell
// allocated cells in current block
1147 + markedCells(m_heap
.nextBlock
, m_heap
.nextCell
) // marked cells in remainder of m_heap
1148 - m_heap
.usedBlocks
; // 1 cell per block is a dummy sentinel
1151 void Heap::addToStatistics(Heap::Statistics
& statistics
) const
1153 statistics
.size
+= m_heap
.usedBlocks
* BLOCK_SIZE
;
1154 statistics
.free
+= m_heap
.usedBlocks
* BLOCK_SIZE
- (objectCount() * HeapConstants::cellSize
);
1157 Heap::Statistics
Heap::statistics() const
1159 Statistics statistics
= { 0, 0 };
1160 addToStatistics(statistics
);
1164 size_t Heap::globalObjectCount()
1167 if (JSGlobalObject
* head
= m_globalData
->head
) {
1168 JSGlobalObject
* o
= head
;
1172 } while (o
!= head
);
1177 size_t Heap::protectedGlobalObjectCount()
1180 if (JSGlobalObject
* head
= m_globalData
->head
) {
1181 JSGlobalObject
* o
= head
;
1183 if (m_protectedValues
.contains(o
))
1186 } while (o
!= head
);
1192 size_t Heap::protectedObjectCount()
1194 return m_protectedValues
.size();
1197 static const char* typeName(JSCell
* cell
)
1199 if (cell
->isString())
1202 if (cell
->isNumber())
1205 if (cell
->isGetterSetter())
1206 return "Getter-Setter";
1207 if (cell
->isAPIValueWrapper())
1208 return "API wrapper";
1209 if (cell
->isPropertyNameIterator())
1210 return "For-in iterator";
1211 if (!cell
->isObject())
1212 return "[empty cell]";
1213 const ClassInfo
* info
= cell
->classInfo();
1214 return info
? info
->className
: "Object";
1217 HashCountedSet
<const char*>* Heap::protectedObjectTypeCounts()
1219 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1221 ProtectCountSet::iterator end
= m_protectedValues
.end();
1222 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
1223 counts
->add(typeName(it
->first
));
1228 HashCountedSet
<const char*>* Heap::objectTypeCounts()
1230 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1232 LiveObjectIterator it
= primaryHeapBegin();
1233 LiveObjectIterator heapEnd
= primaryHeapEnd();
1234 for ( ; it
!= heapEnd
; ++it
)
1235 counts
->add(typeName(*it
));
1242 return m_heap
.operationInProgress
!= NoOperation
;
1247 JAVASCRIPTCORE_GC_BEGIN();
1251 JAVASCRIPTCORE_GC_MARKED();
1253 m_heap
.nextCell
= 0;
1254 m_heap
.nextBlock
= 0;
1255 m_heap
.nextNumber
= 0;
1256 m_heap
.extraCost
= 0;
1257 #if ENABLE(JSC_ZOMBIES)
1262 JAVASCRIPTCORE_GC_END();
1265 void Heap::collectAllGarbage()
1267 JAVASCRIPTCORE_GC_BEGIN();
1269 // If the last iteration through the heap deallocated blocks, we need
1270 // to clean up remaining garbage before marking. Otherwise, the conservative
1271 // marking mechanism might follow a pointer to unmapped memory.
1272 if (m_heap
.didShrink
)
1277 JAVASCRIPTCORE_GC_MARKED();
1279 m_heap
.nextCell
= 0;
1280 m_heap
.nextBlock
= 0;
1281 m_heap
.nextNumber
= 0;
1282 m_heap
.extraCost
= 0;
1286 JAVASCRIPTCORE_GC_END();
1289 LiveObjectIterator
Heap::primaryHeapBegin()
1291 return LiveObjectIterator(m_heap
, 0);
1294 LiveObjectIterator
Heap::primaryHeapEnd()
1296 return LiveObjectIterator(m_heap
, m_heap
.usedBlocks
);