2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
30 #include "JSONObject.h"
39 #include <wtf/FastMalloc.h>
40 #include <wtf/HashCountedSet.h>
41 #include <wtf/UnusedParam.h>
42 #include <wtf/VMTags.h>
46 #include <mach/mach_init.h>
47 #include <mach/mach_port.h>
48 #include <mach/task.h>
49 #include <mach/thread_act.h>
50 #include <mach/vm_map.h>
52 #elif PLATFORM(WIN_OS)
68 #if HAVE(PTHREAD_NP_H)
69 #include <pthread_np.h>
74 #define DEBUG_COLLECTOR 0
75 #define COLLECT_ON_EVERY_ALLOCATION 0
83 const size_t SPARE_EMPTY_BLOCKS
= 2;
84 const size_t GROWTH_FACTOR
= 2;
85 const size_t LOW_WATER_FACTOR
= 4;
86 const size_t ALLOCATIONS_PER_COLLECTION
= 4000;
87 // This value has to be a macro to be used in max() without introducing
88 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
89 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
91 static void freeHeap(CollectorHeap
*);
93 #if ENABLE(JSC_MULTIPLE_THREADS)
96 typedef mach_port_t PlatformThread
;
97 #elif PLATFORM(WIN_OS)
98 struct PlatformThread
{
99 PlatformThread(DWORD _id
, HANDLE _handle
) : id(_id
), handle(_handle
) {}
107 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
108 : posixThread(pthread
)
109 , platformThread(platThread
)
115 pthread_t posixThread
;
116 PlatformThread platformThread
;
122 Heap::Heap(JSGlobalData
* globalData
)
124 #if ENABLE(JSC_MULTIPLE_THREADS)
125 , m_registeredThreads(0)
126 , m_currentThreadRegistrar(0)
128 , m_globalData(globalData
)
132 memset(&primaryHeap
, 0, sizeof(CollectorHeap
));
133 memset(&numberHeap
, 0, sizeof(CollectorHeap
));
138 // The destroy function must already have been called, so assert this.
139 ASSERT(!m_globalData
);
149 // The global object is not GC protected at this point, so sweeping may delete it
150 // (and thus the global data) before other objects that may use the global data.
151 RefPtr
<JSGlobalData
> protect(m_globalData
);
153 delete m_markListSet
;
156 sweep
<PrimaryHeap
>();
157 // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
159 ASSERT(!primaryHeap
.numLiveObjects
);
161 freeHeap(&primaryHeap
);
162 freeHeap(&numberHeap
);
164 #if ENABLE(JSC_MULTIPLE_THREADS)
165 if (m_currentThreadRegistrar
) {
166 int error
= pthread_key_delete(m_currentThreadRegistrar
);
167 ASSERT_UNUSED(error
, !error
);
170 MutexLocker
registeredThreadsLock(m_registeredThreadsMutex
);
171 for (Heap::Thread
* t
= m_registeredThreads
; t
;) {
172 Heap::Thread
* next
= t
->next
;
181 template <HeapType heapType
>
182 static NEVER_INLINE CollectorBlock
* allocateBlock()
185 vm_address_t address
= 0;
186 // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
187 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
| VM_TAG_FOR_COLLECTOR_MEMORY
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
188 #elif PLATFORM(SYMBIAN)
189 // no memory map in symbian, need to hack with fastMalloc
190 void* address
= fastMalloc(BLOCK_SIZE
);
191 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
192 #elif PLATFORM(WIN_OS)
193 // windows virtual address granularity is naturally 64k
194 LPVOID address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
195 #elif HAVE(POSIX_MEMALIGN)
197 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
198 memset(address
, 0, BLOCK_SIZE
);
201 #if ENABLE(JSC_MULTIPLE_THREADS)
202 #error Need to initialize pagesize safely.
204 static size_t pagesize
= getpagesize();
207 if (BLOCK_SIZE
> pagesize
)
208 extra
= BLOCK_SIZE
- pagesize
;
210 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
211 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
214 if ((address
& BLOCK_OFFSET_MASK
) != 0)
215 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
218 munmap(reinterpret_cast<char*>(address
), adjust
);
221 munmap(reinterpret_cast<char*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
224 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
226 reinterpret_cast<CollectorBlock
*>(address
)->type
= heapType
;
227 return reinterpret_cast<CollectorBlock
*>(address
);
230 static void freeBlock(CollectorBlock
* block
)
233 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
234 #elif PLATFORM(SYMBIAN)
236 #elif PLATFORM(WIN_OS)
237 VirtualFree(block
, 0, MEM_RELEASE
);
238 #elif HAVE(POSIX_MEMALIGN)
241 munmap(reinterpret_cast<char*>(block
), BLOCK_SIZE
);
245 static void freeHeap(CollectorHeap
* heap
)
247 for (size_t i
= 0; i
< heap
->usedBlocks
; ++i
)
249 freeBlock(heap
->blocks
[i
]);
250 fastFree(heap
->blocks
);
251 memset(heap
, 0, sizeof(CollectorHeap
));
254 void Heap::recordExtraCost(size_t cost
)
256 // Our frequency of garbage collection tries to balance memory use against speed
257 // by collecting based on the number of newly created values. However, for values
258 // that hold on to a great deal of memory that's not in the form of other JS values,
259 // that is not good enough - in some cases a lot of those objects can pile up and
260 // use crazy amounts of memory without a GC happening. So we track these extra
261 // memory costs. Only unusually large objects are noted, and we only keep track
262 // of this extra cost until the next GC. In garbage collected languages, most values
263 // are either very short lived temporaries, or have extremely long lifetimes. So
264 // if a large value survives one garbage collection, there is not much point to
265 // collecting more frequently as long as it stays alive.
266 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
268 primaryHeap
.extraCost
+= cost
;
271 template <HeapType heapType
> ALWAYS_INLINE
void* Heap::heapAllocate(size_t s
)
273 typedef typename HeapConstants
<heapType
>::Block Block
;
274 typedef typename HeapConstants
<heapType
>::Cell Cell
;
276 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
277 ASSERT(JSLock::lockCount() > 0);
278 ASSERT(JSLock::currentThreadIsHoldingLock());
279 ASSERT_UNUSED(s
, s
<= HeapConstants
<heapType
>::cellSize
);
281 ASSERT(heap
.operationInProgress
== NoOperation
);
282 ASSERT(heapType
== PrimaryHeap
|| heap
.extraCost
== 0);
283 // FIXME: If another global variable access here doesn't hurt performance
284 // too much, we could CRASH() in NDEBUG builds, which could help ensure we
285 // don't spend any time debugging cases where we allocate inside an object's
286 // deallocation code.
288 #if COLLECT_ON_EVERY_ALLOCATION
292 size_t numLiveObjects
= heap
.numLiveObjects
;
293 size_t usedBlocks
= heap
.usedBlocks
;
294 size_t i
= heap
.firstBlockWithPossibleSpace
;
296 // if we have a huge amount of extra cost, we'll try to collect even if we still have
298 if (heapType
== PrimaryHeap
&& heap
.extraCost
> ALLOCATIONS_PER_COLLECTION
) {
299 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
300 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
301 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
302 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
)
306 ASSERT(heap
.operationInProgress
== NoOperation
);
308 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
309 heap
.operationInProgress
= Allocation
;
314 size_t targetBlockUsedCells
;
315 if (i
!= usedBlocks
) {
316 targetBlock
= reinterpret_cast<Block
*>(heap
.blocks
[i
]);
317 targetBlockUsedCells
= targetBlock
->usedCells
;
318 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
319 while (targetBlockUsedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
320 if (++i
== usedBlocks
)
322 targetBlock
= reinterpret_cast<Block
*>(heap
.blocks
[i
]);
323 targetBlockUsedCells
= targetBlock
->usedCells
;
324 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
326 heap
.firstBlockWithPossibleSpace
= i
;
330 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
331 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
332 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
334 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
) {
336 heap
.operationInProgress
= NoOperation
;
338 bool collected
= collect();
340 heap
.operationInProgress
= Allocation
;
343 numLiveObjects
= heap
.numLiveObjects
;
344 usedBlocks
= heap
.usedBlocks
;
345 i
= heap
.firstBlockWithPossibleSpace
;
350 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
351 size_t numBlocks
= heap
.numBlocks
;
352 if (usedBlocks
== numBlocks
) {
353 static const size_t maxNumBlocks
= ULONG_MAX
/ sizeof(CollectorBlock
*) / GROWTH_FACTOR
;
354 if (numBlocks
> maxNumBlocks
)
356 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
357 heap
.numBlocks
= numBlocks
;
358 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
361 targetBlock
= reinterpret_cast<Block
*>(allocateBlock
<heapType
>());
362 targetBlock
->freeList
= targetBlock
->cells
;
363 targetBlock
->heap
= this;
364 targetBlockUsedCells
= 0;
365 heap
.blocks
[usedBlocks
] = reinterpret_cast<CollectorBlock
*>(targetBlock
);
366 heap
.usedBlocks
= usedBlocks
+ 1;
367 heap
.firstBlockWithPossibleSpace
= usedBlocks
;
370 // find a free spot in the block and detach it from the free list
371 Cell
* newCell
= targetBlock
->freeList
;
373 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
374 targetBlock
->freeList
= (newCell
+ 1) + newCell
->u
.freeCell
.next
;
376 targetBlock
->usedCells
= static_cast<uint32_t>(targetBlockUsedCells
+ 1);
377 heap
.numLiveObjects
= numLiveObjects
+ 1;
380 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
381 heap
.operationInProgress
= NoOperation
;
387 void* Heap::allocate(size_t s
)
389 return heapAllocate
<PrimaryHeap
>(s
);
392 void* Heap::allocateNumber(size_t s
)
394 return heapAllocate
<NumberHeap
>(s
);
398 void* g_stackBase
= 0;
400 inline bool isPageWritable(void* page
)
402 MEMORY_BASIC_INFORMATION memoryInformation
;
403 DWORD result
= VirtualQuery(page
, &memoryInformation
, sizeof(memoryInformation
));
405 // return false on error, including ptr outside memory
406 if (result
!= sizeof(memoryInformation
))
409 DWORD protect
= memoryInformation
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
);
410 return protect
== PAGE_READWRITE
411 || protect
== PAGE_WRITECOPY
412 || protect
== PAGE_EXECUTE_READWRITE
413 || protect
== PAGE_EXECUTE_WRITECOPY
;
416 static void* getStackBase(void* previousFrame
)
418 // find the address of this stack frame by taking the address of a local variable
419 bool isGrowingDownward
;
420 void* thisFrame
= (void*)(&isGrowingDownward
);
422 isGrowingDownward
= previousFrame
< &thisFrame
;
423 static DWORD pageSize
= 0;
425 SYSTEM_INFO systemInfo
;
426 GetSystemInfo(&systemInfo
);
427 pageSize
= systemInfo
.dwPageSize
;
430 // scan all of memory starting from this frame, and return the last writeable page found
431 register char* currentPage
= (char*)((DWORD
)thisFrame
& ~(pageSize
- 1));
432 if (isGrowingDownward
) {
433 while (currentPage
> 0) {
434 // check for underflow
435 if (currentPage
>= (char*)pageSize
)
436 currentPage
-= pageSize
;
439 if (!isPageWritable(currentPage
))
440 return currentPage
+ pageSize
;
445 // guaranteed to complete because isPageWritable returns false at end of memory
446 currentPage
+= pageSize
;
447 if (!isPageWritable(currentPage
))
454 static inline void* currentThreadStackBase()
457 pthread_t thread
= pthread_self();
458 return pthread_get_stackaddr_np(thread
);
459 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
460 // offset 0x18 from the FS segment register gives a pointer to
461 // the thread information block for the current thread
467 return static_cast<void*>(pTib
->StackBase
);
468 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
469 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
470 return reinterpret_cast<void*>(pTib
->StackBase
);
471 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
472 // offset 0x18 from the FS segment register gives a pointer to
473 // the thread information block for the current thread
475 asm ( "movl %%fs:0x18, %0\n"
478 return static_cast<void*>(pTib
->StackBase
);
479 #elif PLATFORM(SOLARIS)
483 #elif PLATFORM(OPENBSD)
484 pthread_t thread
= pthread_self();
486 pthread_stackseg_np(thread
, &stack
);
488 #elif PLATFORM(SYMBIAN)
489 static void* stackBase
= 0;
490 if (stackBase
== 0) {
491 TThreadStackInfo info
;
493 thread
.StackInfo(info
);
494 stackBase
= (void*)info
.iBase
;
496 return (void*)stackBase
;
498 static void* stackBase
= 0;
499 static size_t stackSize
= 0;
500 static pthread_t stackThread
;
501 pthread_t thread
= pthread_self();
502 if (stackBase
== 0 || thread
!= stackThread
) {
503 pthread_attr_t sattr
;
504 pthread_attr_init(&sattr
);
505 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
506 // e.g. on FreeBSD 5.4, neundorf@kde.org
507 pthread_attr_get_np(thread
, &sattr
);
509 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
510 pthread_getattr_np(thread
, &sattr
);
512 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
513 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
515 pthread_attr_destroy(&sattr
);
516 stackThread
= thread
;
518 return static_cast<char*>(stackBase
) + stackSize
;
519 #elif PLATFORM(WINCE)
524 return getStackBase(&dummy
);
527 #error Need a way to get the stack base on this platform
531 #if ENABLE(JSC_MULTIPLE_THREADS)
533 static inline PlatformThread
getCurrentPlatformThread()
536 return pthread_mach_thread_np(pthread_self());
537 #elif PLATFORM(WIN_OS)
538 HANDLE threadHandle
= pthread_getw32threadhandle_np(pthread_self());
539 return PlatformThread(GetCurrentThreadId(), threadHandle
);
543 void Heap::makeUsableFromMultipleThreads()
545 if (m_currentThreadRegistrar
)
548 int error
= pthread_key_create(&m_currentThreadRegistrar
, unregisterThread
);
553 void Heap::registerThread()
555 if (!m_currentThreadRegistrar
|| pthread_getspecific(m_currentThreadRegistrar
))
558 pthread_setspecific(m_currentThreadRegistrar
, this);
559 Heap::Thread
* thread
= new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
561 MutexLocker
lock(m_registeredThreadsMutex
);
563 thread
->next
= m_registeredThreads
;
564 m_registeredThreads
= thread
;
567 void Heap::unregisterThread(void* p
)
570 static_cast<Heap
*>(p
)->unregisterThread();
573 void Heap::unregisterThread()
575 pthread_t currentPosixThread
= pthread_self();
577 MutexLocker
lock(m_registeredThreadsMutex
);
579 if (pthread_equal(currentPosixThread
, m_registeredThreads
->posixThread
)) {
580 Thread
* t
= m_registeredThreads
;
581 m_registeredThreads
= m_registeredThreads
->next
;
584 Heap::Thread
* last
= m_registeredThreads
;
586 for (t
= m_registeredThreads
->next
; t
; t
= t
->next
) {
587 if (pthread_equal(t
->posixThread
, currentPosixThread
)) {
588 last
->next
= t
->next
;
593 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
598 #else // ENABLE(JSC_MULTIPLE_THREADS)
600 void Heap::registerThread()
606 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
608 // cell size needs to be a power of two for this to be valid
609 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
611 void Heap::markConservatively(void* start
, void* end
)
619 ASSERT((static_cast<char*>(end
) - static_cast<char*>(start
)) < 0x1000000);
620 ASSERT(IS_POINTER_ALIGNED(start
));
621 ASSERT(IS_POINTER_ALIGNED(end
));
623 char** p
= static_cast<char**>(start
);
624 char** e
= static_cast<char**>(end
);
626 size_t usedPrimaryBlocks
= primaryHeap
.usedBlocks
;
627 size_t usedNumberBlocks
= numberHeap
.usedBlocks
;
628 CollectorBlock
** primaryBlocks
= primaryHeap
.blocks
;
629 CollectorBlock
** numberBlocks
= numberHeap
.blocks
;
631 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
635 if (IS_HALF_CELL_ALIGNED(x
) && x
) {
636 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
637 xAsBits
&= CELL_ALIGN_MASK
;
638 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
639 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
640 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
641 for (size_t block
= 0; block
< usedNumberBlocks
; block
++) {
642 if ((numberBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
643 Heap::markCell(reinterpret_cast<JSCell
*>(xAsBits
));
648 // Mark the primary heap
649 for (size_t block
= 0; block
< usedPrimaryBlocks
; block
++) {
650 if ((primaryBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
651 if (reinterpret_cast<CollectorCell
*>(xAsBits
)->u
.freeCell
.zeroIfFree
!= 0) {
652 JSCell
* imp
= reinterpret_cast<JSCell
*>(xAsBits
);
665 void NEVER_INLINE
Heap::markCurrentThreadConservativelyInternal()
668 void* stackPointer
= &dummy
;
669 void* stackBase
= currentThreadStackBase();
670 markConservatively(stackPointer
, stackBase
);
673 void Heap::markCurrentThreadConservatively()
675 // setjmp forces volatile registers onto the stack
678 #pragma warning(push)
679 #pragma warning(disable: 4611)
686 markCurrentThreadConservativelyInternal();
689 #if ENABLE(JSC_MULTIPLE_THREADS)
691 static inline void suspendThread(const PlatformThread
& platformThread
)
694 thread_suspend(platformThread
);
695 #elif PLATFORM(WIN_OS)
696 SuspendThread(platformThread
.handle
);
698 #error Need a way to suspend threads on this platform
702 static inline void resumeThread(const PlatformThread
& platformThread
)
705 thread_resume(platformThread
);
706 #elif PLATFORM(WIN_OS)
707 ResumeThread(platformThread
.handle
);
709 #error Need a way to resume threads on this platform
713 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
718 typedef i386_thread_state_t PlatformThreadRegisters
;
719 #elif PLATFORM(X86_64)
720 typedef x86_thread_state64_t PlatformThreadRegisters
;
722 typedef ppc_thread_state_t PlatformThreadRegisters
;
723 #elif PLATFORM(PPC64)
724 typedef ppc_thread_state64_t PlatformThreadRegisters
;
726 typedef arm_thread_state_t PlatformThreadRegisters
;
728 #error Unknown Architecture
731 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
732 typedef CONTEXT PlatformThreadRegisters
;
734 #error Need a thread register struct for this platform
737 static size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
742 unsigned user_count
= sizeof(regs
)/sizeof(int);
743 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
744 #elif PLATFORM(X86_64)
745 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
746 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
748 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
749 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
750 #elif PLATFORM(PPC64)
751 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
752 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
754 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
755 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
757 #error Unknown Architecture
760 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
761 if (result
!= KERN_SUCCESS
) {
762 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
763 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
766 return user_count
* sizeof(usword_t
);
767 // end PLATFORM(DARWIN)
769 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
770 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
771 GetThreadContext(platformThread
.handle
, ®s
);
772 return sizeof(CONTEXT
);
774 #error Need a way to get thread registers on this platform
778 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
785 return reinterpret_cast<void*>(regs
.__esp
);
786 #elif PLATFORM(X86_64)
787 return reinterpret_cast<void*>(regs
.__rsp
);
788 #elif PLATFORM(PPC) || PLATFORM(PPC64)
789 return reinterpret_cast<void*>(regs
.__r1
);
791 return reinterpret_cast<void*>(regs
.__sp
);
793 #error Unknown Architecture
796 #else // !__DARWIN_UNIX03
799 return reinterpret_cast<void*>(regs
.esp
);
800 #elif PLATFORM(X86_64)
801 return reinterpret_cast<void*>(regs
.rsp
);
802 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
803 return reinterpret_cast<void*>(regs
.r1
);
805 #error Unknown Architecture
808 #endif // __DARWIN_UNIX03
810 // end PLATFORM(DARWIN)
811 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
812 return reinterpret_cast<void*>((uintptr_t) regs
.Esp
);
814 #error Need a way to get the stack pointer for another thread on this platform
818 void Heap::markOtherThreadConservatively(Thread
* thread
)
820 suspendThread(thread
->platformThread
);
822 PlatformThreadRegisters regs
;
823 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
825 // mark the thread's registers
826 markConservatively(static_cast<void*>(®s
), static_cast<void*>(reinterpret_cast<char*>(®s
) + regSize
));
828 void* stackPointer
= otherThreadStackPointer(regs
);
829 markConservatively(stackPointer
, thread
->stackBase
);
831 resumeThread(thread
->platformThread
);
836 void Heap::markStackObjectsConservatively()
838 markCurrentThreadConservatively();
840 #if ENABLE(JSC_MULTIPLE_THREADS)
842 if (m_currentThreadRegistrar
) {
844 MutexLocker
lock(m_registeredThreadsMutex
);
847 // Forbid malloc during the mark phase. Marking a thread suspends it, so
848 // a malloc inside mark() would risk a deadlock with a thread that had been
849 // suspended while holding the malloc lock.
852 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
853 // and since this is a shared heap, they are real locks.
854 for (Thread
* thread
= m_registeredThreads
; thread
; thread
= thread
->next
) {
855 if (!pthread_equal(thread
->posixThread
, pthread_self()))
856 markOtherThreadConservatively(thread
);
865 void Heap::setGCProtectNeedsLocking()
867 // Most clients do not need to call this, with the notable exception of WebCore.
868 // Clients that use shared heap have JSLock protection, while others are supposed
869 // to do explicit locking. WebCore violates this contract in Database code,
870 // which calls gcUnprotect from a secondary thread.
871 if (!m_protectedValuesMutex
)
872 m_protectedValuesMutex
.set(new Mutex
);
875 void Heap::protect(JSValue k
)
878 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
883 if (m_protectedValuesMutex
)
884 m_protectedValuesMutex
->lock();
886 m_protectedValues
.add(k
.asCell());
888 if (m_protectedValuesMutex
)
889 m_protectedValuesMutex
->unlock();
892 void Heap::unprotect(JSValue k
)
895 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
900 if (m_protectedValuesMutex
)
901 m_protectedValuesMutex
->lock();
903 m_protectedValues
.remove(k
.asCell());
905 if (m_protectedValuesMutex
)
906 m_protectedValuesMutex
->unlock();
909 Heap
* Heap::heap(JSValue v
)
913 return Heap::cellBlock(v
.asCell())->heap
;
916 void Heap::markProtectedObjects()
918 if (m_protectedValuesMutex
)
919 m_protectedValuesMutex
->lock();
921 ProtectCountSet::iterator end
= m_protectedValues
.end();
922 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
923 JSCell
* val
= it
->first
;
928 if (m_protectedValuesMutex
)
929 m_protectedValuesMutex
->unlock();
932 template <HeapType heapType
> size_t Heap::sweep()
934 typedef typename HeapConstants
<heapType
>::Block Block
;
935 typedef typename HeapConstants
<heapType
>::Cell Cell
;
937 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
938 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
940 size_t emptyBlocks
= 0;
941 size_t numLiveObjects
= heap
.numLiveObjects
;
943 for (size_t block
= 0; block
< heap
.usedBlocks
; block
++) {
944 Block
* curBlock
= reinterpret_cast<Block
*>(heap
.blocks
[block
]);
946 size_t usedCells
= curBlock
->usedCells
;
947 Cell
* freeList
= curBlock
->freeList
;
949 if (usedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
950 // special case with a block where all cells are used -- testing indicates this happens often
951 for (size_t i
= 0; i
< HeapConstants
<heapType
>::cellsPerBlock
; i
++) {
952 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
953 Cell
* cell
= curBlock
->cells
+ i
;
955 if (heapType
!= NumberHeap
) {
956 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
957 // special case for allocated but uninitialized object
958 // (We don't need this check earlier because nothing prior this point
959 // assumes the object has a valid vptr.)
960 if (cell
->u
.freeCell
.zeroIfFree
== 0)
969 // put cell on the free list
970 cell
->u
.freeCell
.zeroIfFree
= 0;
971 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
976 size_t minimumCellsToProcess
= usedCells
;
977 for (size_t i
= 0; (i
< minimumCellsToProcess
) & (i
< HeapConstants
<heapType
>::cellsPerBlock
); i
++) {
978 Cell
* cell
= curBlock
->cells
+ i
;
979 if (cell
->u
.freeCell
.zeroIfFree
== 0) {
980 ++minimumCellsToProcess
;
982 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
983 if (heapType
!= NumberHeap
) {
984 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
990 // put cell on the free list
991 cell
->u
.freeCell
.zeroIfFree
= 0;
992 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
999 curBlock
->usedCells
= static_cast<uint32_t>(usedCells
);
1000 curBlock
->freeList
= freeList
;
1001 curBlock
->marked
.clearAll();
1003 if (usedCells
== 0) {
1005 if (emptyBlocks
> SPARE_EMPTY_BLOCKS
) {
1006 #if !DEBUG_COLLECTOR
1007 freeBlock(reinterpret_cast<CollectorBlock
*>(curBlock
));
1009 // swap with the last block so we compact as we go
1010 heap
.blocks
[block
] = heap
.blocks
[heap
.usedBlocks
- 1];
1012 block
--; // Don't move forward a step in this case
1014 if (heap
.numBlocks
> MIN_ARRAY_SIZE
&& heap
.usedBlocks
< heap
.numBlocks
/ LOW_WATER_FACTOR
) {
1015 heap
.numBlocks
= heap
.numBlocks
/ GROWTH_FACTOR
;
1016 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, heap
.numBlocks
* sizeof(CollectorBlock
*)));
1022 if (heap
.numLiveObjects
!= numLiveObjects
)
1023 heap
.firstBlockWithPossibleSpace
= 0;
1025 heap
.numLiveObjects
= numLiveObjects
;
1026 heap
.numLiveObjectsAtLastCollect
= numLiveObjects
;
1028 return numLiveObjects
;
1031 bool Heap::collect()
1034 if (m_globalData
->isSharedInstance
) {
1035 ASSERT(JSLock::lockCount() > 0);
1036 ASSERT(JSLock::currentThreadIsHoldingLock());
1040 ASSERT((primaryHeap
.operationInProgress
== NoOperation
) | (numberHeap
.operationInProgress
== NoOperation
));
1041 if ((primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
))
1044 JAVASCRIPTCORE_GC_BEGIN();
1045 primaryHeap
.operationInProgress
= Collection
;
1046 numberHeap
.operationInProgress
= Collection
;
1048 // MARK: first mark all referenced objects recursively starting out from the set of root objects
1050 markStackObjectsConservatively();
1051 markProtectedObjects();
1052 if (m_markListSet
&& m_markListSet
->size())
1053 MarkedArgumentBuffer::markLists(*m_markListSet
);
1054 if (m_globalData
->exception
&& !m_globalData
->exception
.marked())
1055 m_globalData
->exception
.mark();
1056 m_globalData
->interpreter
->registerFile().markCallFrames(this);
1057 m_globalData
->smallStrings
.mark();
1058 if (m_globalData
->scopeNodeBeingReparsed
)
1059 m_globalData
->scopeNodeBeingReparsed
->mark();
1060 if (m_globalData
->firstStringifierToMark
)
1061 JSONObject::markStringifiers(m_globalData
->firstStringifierToMark
);
1063 JAVASCRIPTCORE_GC_MARKED();
1065 size_t originalLiveObjects
= primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
;
1066 size_t numLiveObjects
= sweep
<PrimaryHeap
>();
1067 numLiveObjects
+= sweep
<NumberHeap
>();
1069 primaryHeap
.operationInProgress
= NoOperation
;
1070 numberHeap
.operationInProgress
= NoOperation
;
1071 JAVASCRIPTCORE_GC_END(originalLiveObjects
, numLiveObjects
);
1073 return numLiveObjects
< originalLiveObjects
;
1076 size_t Heap::objectCount()
1078 return primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
- m_globalData
->smallStrings
.count();
1081 template <HeapType heapType
>
1082 static void addToStatistics(Heap::Statistics
& statistics
, const CollectorHeap
& heap
)
1084 typedef HeapConstants
<heapType
> HC
;
1085 for (size_t i
= 0; i
< heap
.usedBlocks
; ++i
) {
1086 if (heap
.blocks
[i
]) {
1087 statistics
.size
+= BLOCK_SIZE
;
1088 statistics
.free
+= (HC::cellsPerBlock
- heap
.blocks
[i
]->usedCells
) * HC::cellSize
;
1093 Heap::Statistics
Heap::statistics() const
1095 Statistics statistics
= { 0, 0 };
1096 JSC::addToStatistics
<PrimaryHeap
>(statistics
, primaryHeap
);
1097 JSC::addToStatistics
<NumberHeap
>(statistics
, numberHeap
);
1101 size_t Heap::globalObjectCount()
1104 if (JSGlobalObject
* head
= m_globalData
->head
) {
1105 JSGlobalObject
* o
= head
;
1109 } while (o
!= head
);
1114 size_t Heap::protectedGlobalObjectCount()
1116 if (m_protectedValuesMutex
)
1117 m_protectedValuesMutex
->lock();
1120 if (JSGlobalObject
* head
= m_globalData
->head
) {
1121 JSGlobalObject
* o
= head
;
1123 if (m_protectedValues
.contains(o
))
1126 } while (o
!= head
);
1129 if (m_protectedValuesMutex
)
1130 m_protectedValuesMutex
->unlock();
1135 size_t Heap::protectedObjectCount()
1137 if (m_protectedValuesMutex
)
1138 m_protectedValuesMutex
->lock();
1140 size_t result
= m_protectedValues
.size();
1142 if (m_protectedValuesMutex
)
1143 m_protectedValuesMutex
->unlock();
1148 static const char* typeName(JSCell
* cell
)
1150 if (cell
->isString())
1153 if (cell
->isNumber())
1156 if (cell
->isGetterSetter())
1157 return "gettersetter";
1158 ASSERT(cell
->isObject());
1159 const ClassInfo
* info
= static_cast<JSObject
*>(cell
)->classInfo();
1160 return info
? info
->className
: "Object";
1163 HashCountedSet
<const char*>* Heap::protectedObjectTypeCounts()
1165 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1167 if (m_protectedValuesMutex
)
1168 m_protectedValuesMutex
->lock();
1170 ProtectCountSet::iterator end
= m_protectedValues
.end();
1171 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
1172 counts
->add(typeName(it
->first
));
1174 if (m_protectedValuesMutex
)
1175 m_protectedValuesMutex
->unlock();
1182 return (primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
);
1185 Heap::iterator
Heap::primaryHeapBegin()
1187 return iterator(primaryHeap
.blocks
, primaryHeap
.blocks
+ primaryHeap
.usedBlocks
);
1190 Heap::iterator
Heap::primaryHeapEnd()
1192 return iterator(primaryHeap
.blocks
+ primaryHeap
.usedBlocks
, primaryHeap
.blocks
+ primaryHeap
.usedBlocks
);