2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
37 #include <wtf/FastMalloc.h>
38 #include <wtf/HashCountedSet.h>
39 #include <wtf/UnusedParam.h>
43 #include <mach/mach_port.h>
44 #include <mach/mach_init.h>
45 #include <mach/task.h>
46 #include <mach/thread_act.h>
47 #include <mach/vm_map.h>
49 #elif PLATFORM(WIN_OS)
67 #if HAVE(PTHREAD_NP_H)
68 #include <pthread_np.h>
73 #define DEBUG_COLLECTOR 0
74 #define COLLECT_ON_EVERY_ALLOCATION 0
82 const size_t SPARE_EMPTY_BLOCKS
= 2;
83 const size_t GROWTH_FACTOR
= 2;
84 const size_t LOW_WATER_FACTOR
= 4;
85 const size_t ALLOCATIONS_PER_COLLECTION
= 4000;
86 // This value has to be a macro to be used in max() without introducing
87 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
88 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
90 static void freeHeap(CollectorHeap
*);
92 #if ENABLE(JSC_MULTIPLE_THREADS)
95 typedef mach_port_t PlatformThread
;
96 #elif PLATFORM(WIN_OS)
97 struct PlatformThread
{
98 PlatformThread(DWORD _id
, HANDLE _handle
) : id(_id
), handle(_handle
) {}
106 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
107 : posixThread(pthread
)
108 , platformThread(platThread
)
114 pthread_t posixThread
;
115 PlatformThread platformThread
;
121 Heap::Heap(JSGlobalData
* globalData
)
123 #if ENABLE(JSC_MULTIPLE_THREADS)
124 , m_registeredThreads(0)
125 , m_currentThreadRegistrar(0)
127 , m_globalData(globalData
)
131 memset(&primaryHeap
, 0, sizeof(CollectorHeap
));
132 memset(&numberHeap
, 0, sizeof(CollectorHeap
));
137 // The destroy function must already have been called, so assert this.
138 ASSERT(!m_globalData
);
148 // The global object is not GC protected at this point, so sweeping may delete it
149 // (and thus the global data) before other objects that may use the global data.
150 RefPtr
<JSGlobalData
> protect(m_globalData
);
152 delete m_markListSet
;
155 sweep
<PrimaryHeap
>();
156 // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
158 ASSERT(!primaryHeap
.numLiveObjects
);
160 freeHeap(&primaryHeap
);
161 freeHeap(&numberHeap
);
163 #if ENABLE(JSC_MULTIPLE_THREADS)
164 if (m_currentThreadRegistrar
) {
165 int error
= pthread_key_delete(m_currentThreadRegistrar
);
166 ASSERT_UNUSED(error
, !error
);
169 MutexLocker
registeredThreadsLock(m_registeredThreadsMutex
);
170 for (Heap::Thread
* t
= m_registeredThreads
; t
;) {
171 Heap::Thread
* next
= t
->next
;
180 template <HeapType heapType
>
181 static NEVER_INLINE CollectorBlock
* allocateBlock()
184 vm_address_t address
= 0;
185 // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
186 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
187 #elif PLATFORM(SYMBIAN)
188 // no memory map in symbian, need to hack with fastMalloc
189 void* address
= fastMalloc(BLOCK_SIZE
);
190 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
191 #elif PLATFORM(WIN_OS)
192 // windows virtual address granularity is naturally 64k
193 LPVOID address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
194 #elif HAVE(POSIX_MEMALIGN)
196 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
197 memset(address
, 0, BLOCK_SIZE
);
200 #if ENABLE(JSC_MULTIPLE_THREADS)
201 #error Need to initialize pagesize safely.
203 static size_t pagesize
= getpagesize();
206 if (BLOCK_SIZE
> pagesize
)
207 extra
= BLOCK_SIZE
- pagesize
;
209 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
210 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
213 if ((address
& BLOCK_OFFSET_MASK
) != 0)
214 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
217 munmap(reinterpret_cast<char*>(address
), adjust
);
220 munmap(reinterpret_cast<char*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
223 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
225 reinterpret_cast<CollectorBlock
*>(address
)->type
= heapType
;
226 return reinterpret_cast<CollectorBlock
*>(address
);
229 static void freeBlock(CollectorBlock
* block
)
232 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
233 #elif PLATFORM(SYMBIAN)
235 #elif PLATFORM(WIN_OS)
236 VirtualFree(block
, 0, MEM_RELEASE
);
237 #elif HAVE(POSIX_MEMALIGN)
240 munmap(reinterpret_cast<char*>(block
), BLOCK_SIZE
);
244 static void freeHeap(CollectorHeap
* heap
)
246 for (size_t i
= 0; i
< heap
->usedBlocks
; ++i
)
248 freeBlock(heap
->blocks
[i
]);
249 fastFree(heap
->blocks
);
250 memset(heap
, 0, sizeof(CollectorHeap
));
253 void Heap::recordExtraCost(size_t cost
)
255 // Our frequency of garbage collection tries to balance memory use against speed
256 // by collecting based on the number of newly created values. However, for values
257 // that hold on to a great deal of memory that's not in the form of other JS values,
258 // that is not good enough - in some cases a lot of those objects can pile up and
259 // use crazy amounts of memory without a GC happening. So we track these extra
260 // memory costs. Only unusually large objects are noted, and we only keep track
261 // of this extra cost until the next GC. In garbage collected languages, most values
262 // are either very short lived temporaries, or have extremely long lifetimes. So
263 // if a large value survives one garbage collection, there is not much point to
264 // collecting more frequently as long as it stays alive.
265 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
267 primaryHeap
.extraCost
+= cost
;
270 template <HeapType heapType
> ALWAYS_INLINE
void* Heap::heapAllocate(size_t s
)
272 typedef typename HeapConstants
<heapType
>::Block Block
;
273 typedef typename HeapConstants
<heapType
>::Cell Cell
;
275 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
276 ASSERT(JSLock::lockCount() > 0);
277 ASSERT(JSLock::currentThreadIsHoldingLock());
278 ASSERT_UNUSED(s
, s
<= HeapConstants
<heapType
>::cellSize
);
280 ASSERT(heap
.operationInProgress
== NoOperation
);
281 ASSERT(heapType
== PrimaryHeap
|| heap
.extraCost
== 0);
282 // FIXME: If another global variable access here doesn't hurt performance
283 // too much, we could CRASH() in NDEBUG builds, which could help ensure we
284 // don't spend any time debugging cases where we allocate inside an object's
285 // deallocation code.
287 #if COLLECT_ON_EVERY_ALLOCATION
291 size_t numLiveObjects
= heap
.numLiveObjects
;
292 size_t usedBlocks
= heap
.usedBlocks
;
293 size_t i
= heap
.firstBlockWithPossibleSpace
;
295 // if we have a huge amount of extra cost, we'll try to collect even if we still have
297 if (heapType
== PrimaryHeap
&& heap
.extraCost
> ALLOCATIONS_PER_COLLECTION
) {
298 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
299 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
300 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
301 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
)
305 ASSERT(heap
.operationInProgress
== NoOperation
);
307 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
308 heap
.operationInProgress
= Allocation
;
313 size_t targetBlockUsedCells
;
314 if (i
!= usedBlocks
) {
315 targetBlock
= reinterpret_cast<Block
*>(heap
.blocks
[i
]);
316 targetBlockUsedCells
= targetBlock
->usedCells
;
317 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
318 while (targetBlockUsedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
319 if (++i
== usedBlocks
)
321 targetBlock
= reinterpret_cast<Block
*>(heap
.blocks
[i
]);
322 targetBlockUsedCells
= targetBlock
->usedCells
;
323 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
325 heap
.firstBlockWithPossibleSpace
= i
;
329 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
330 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
331 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
333 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
) {
335 heap
.operationInProgress
= NoOperation
;
337 bool collected
= collect();
339 heap
.operationInProgress
= Allocation
;
342 numLiveObjects
= heap
.numLiveObjects
;
343 usedBlocks
= heap
.usedBlocks
;
344 i
= heap
.firstBlockWithPossibleSpace
;
349 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
350 size_t numBlocks
= heap
.numBlocks
;
351 if (usedBlocks
== numBlocks
) {
352 static const size_t maxNumBlocks
= ULONG_MAX
/ sizeof(CollectorBlock
*) / GROWTH_FACTOR
;
353 if (numBlocks
> maxNumBlocks
)
355 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
356 heap
.numBlocks
= numBlocks
;
357 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
360 targetBlock
= reinterpret_cast<Block
*>(allocateBlock
<heapType
>());
361 targetBlock
->freeList
= targetBlock
->cells
;
362 targetBlock
->heap
= this;
363 targetBlockUsedCells
= 0;
364 heap
.blocks
[usedBlocks
] = reinterpret_cast<CollectorBlock
*>(targetBlock
);
365 heap
.usedBlocks
= usedBlocks
+ 1;
366 heap
.firstBlockWithPossibleSpace
= usedBlocks
;
369 // find a free spot in the block and detach it from the free list
370 Cell
* newCell
= targetBlock
->freeList
;
372 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
373 targetBlock
->freeList
= (newCell
+ 1) + newCell
->u
.freeCell
.next
;
375 targetBlock
->usedCells
= static_cast<uint32_t>(targetBlockUsedCells
+ 1);
376 heap
.numLiveObjects
= numLiveObjects
+ 1;
379 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
380 heap
.operationInProgress
= NoOperation
;
386 void* Heap::allocate(size_t s
)
388 return heapAllocate
<PrimaryHeap
>(s
);
391 void* Heap::allocateNumber(size_t s
)
393 return heapAllocate
<NumberHeap
>(s
);
396 static inline void* currentThreadStackBase()
399 pthread_t thread
= pthread_self();
400 return pthread_get_stackaddr_np(thread
);
401 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
402 // offset 0x18 from the FS segment register gives a pointer to
403 // the thread information block for the current thread
409 return static_cast<void*>(pTib
->StackBase
);
410 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
411 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
412 return reinterpret_cast<void*>(pTib
->StackBase
);
413 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
414 // offset 0x18 from the FS segment register gives a pointer to
415 // the thread information block for the current thread
417 asm ( "movl %%fs:0x18, %0\n"
420 return static_cast<void*>(pTib
->StackBase
);
421 #elif PLATFORM(SOLARIS)
425 #elif PLATFORM(OPENBSD)
426 pthread_t thread
= pthread_self();
428 pthread_stackseg_np(thread
, &stack
);
431 static void* stackBase
= 0;
432 static size_t stackSize
= 0;
433 static pthread_t stackThread
;
434 pthread_t thread
= pthread_self();
435 if (stackBase
== 0 || thread
!= stackThread
) {
436 pthread_attr_t sattr
;
437 pthread_attr_init(&sattr
);
438 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
439 // e.g. on FreeBSD 5.4, neundorf@kde.org
440 pthread_attr_get_np(thread
, &sattr
);
442 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
443 pthread_getattr_np(thread
, &sattr
);
445 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
446 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
448 pthread_attr_destroy(&sattr
);
449 stackThread
= thread
;
451 return static_cast<char*>(stackBase
) + stackSize
;
452 #elif PLATFORM(SYMBIAN)
453 static void* stackBase
= 0;
454 if (stackBase
== 0) {
455 TThreadStackInfo info
;
457 thread
.StackInfo(info
);
458 stackBase
= (void*)info
.iBase
;
460 return (void*)stackBase
;
462 #error Need a way to get the stack base on this platform
466 #if ENABLE(JSC_MULTIPLE_THREADS)
468 static inline PlatformThread
getCurrentPlatformThread()
471 return pthread_mach_thread_np(pthread_self());
472 #elif PLATFORM(WIN_OS)
473 HANDLE threadHandle
= pthread_getw32threadhandle_np(pthread_self());
474 return PlatformThread(GetCurrentThreadId(), threadHandle
);
478 void Heap::makeUsableFromMultipleThreads()
480 if (m_currentThreadRegistrar
)
483 int error
= pthread_key_create(&m_currentThreadRegistrar
, unregisterThread
);
488 void Heap::registerThread()
490 if (!m_currentThreadRegistrar
|| pthread_getspecific(m_currentThreadRegistrar
))
493 pthread_setspecific(m_currentThreadRegistrar
, this);
494 Heap::Thread
* thread
= new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
496 MutexLocker
lock(m_registeredThreadsMutex
);
498 thread
->next
= m_registeredThreads
;
499 m_registeredThreads
= thread
;
502 void Heap::unregisterThread(void* p
)
505 static_cast<Heap
*>(p
)->unregisterThread();
508 void Heap::unregisterThread()
510 pthread_t currentPosixThread
= pthread_self();
512 MutexLocker
lock(m_registeredThreadsMutex
);
514 if (pthread_equal(currentPosixThread
, m_registeredThreads
->posixThread
)) {
515 Thread
* t
= m_registeredThreads
;
516 m_registeredThreads
= m_registeredThreads
->next
;
519 Heap::Thread
* last
= m_registeredThreads
;
521 for (t
= m_registeredThreads
->next
; t
; t
= t
->next
) {
522 if (pthread_equal(t
->posixThread
, currentPosixThread
)) {
523 last
->next
= t
->next
;
528 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
533 #else // ENABLE(JSC_MULTIPLE_THREADS)
535 void Heap::registerThread()
541 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
543 // cell size needs to be a power of two for this to be valid
544 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
546 void Heap::markConservatively(void* start
, void* end
)
554 ASSERT((static_cast<char*>(end
) - static_cast<char*>(start
)) < 0x1000000);
555 ASSERT(IS_POINTER_ALIGNED(start
));
556 ASSERT(IS_POINTER_ALIGNED(end
));
558 char** p
= static_cast<char**>(start
);
559 char** e
= static_cast<char**>(end
);
561 size_t usedPrimaryBlocks
= primaryHeap
.usedBlocks
;
562 size_t usedNumberBlocks
= numberHeap
.usedBlocks
;
563 CollectorBlock
** primaryBlocks
= primaryHeap
.blocks
;
564 CollectorBlock
** numberBlocks
= numberHeap
.blocks
;
566 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
570 if (IS_HALF_CELL_ALIGNED(x
) && x
) {
571 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
572 xAsBits
&= CELL_ALIGN_MASK
;
573 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
574 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
575 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
576 for (size_t block
= 0; block
< usedNumberBlocks
; block
++) {
577 if ((numberBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
578 Heap::markCell(reinterpret_cast<JSCell
*>(xAsBits
));
583 // Mark the primary heap
584 for (size_t block
= 0; block
< usedPrimaryBlocks
; block
++) {
585 if ((primaryBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
586 if (reinterpret_cast<CollectorCell
*>(xAsBits
)->u
.freeCell
.zeroIfFree
!= 0) {
587 JSCell
* imp
= reinterpret_cast<JSCell
*>(xAsBits
);
600 void NEVER_INLINE
Heap::markCurrentThreadConservativelyInternal()
603 void* stackPointer
= &dummy
;
604 void* stackBase
= currentThreadStackBase();
605 markConservatively(stackPointer
, stackBase
);
608 void Heap::markCurrentThreadConservatively()
610 // setjmp forces volatile registers onto the stack
613 #pragma warning(push)
614 #pragma warning(disable: 4611)
621 markCurrentThreadConservativelyInternal();
624 #if ENABLE(JSC_MULTIPLE_THREADS)
626 static inline void suspendThread(const PlatformThread
& platformThread
)
629 thread_suspend(platformThread
);
630 #elif PLATFORM(WIN_OS)
631 SuspendThread(platformThread
.handle
);
633 #error Need a way to suspend threads on this platform
637 static inline void resumeThread(const PlatformThread
& platformThread
)
640 thread_resume(platformThread
);
641 #elif PLATFORM(WIN_OS)
642 ResumeThread(platformThread
.handle
);
644 #error Need a way to resume threads on this platform
648 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
653 typedef i386_thread_state_t PlatformThreadRegisters
;
654 #elif PLATFORM(X86_64)
655 typedef x86_thread_state64_t PlatformThreadRegisters
;
657 typedef ppc_thread_state_t PlatformThreadRegisters
;
658 #elif PLATFORM(PPC64)
659 typedef ppc_thread_state64_t PlatformThreadRegisters
;
661 typedef arm_thread_state_t PlatformThreadRegisters
;
663 #error Unknown Architecture
666 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
667 typedef CONTEXT PlatformThreadRegisters
;
669 #error Need a thread register struct for this platform
672 static size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
677 unsigned user_count
= sizeof(regs
)/sizeof(int);
678 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
679 #elif PLATFORM(X86_64)
680 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
681 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
683 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
684 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
685 #elif PLATFORM(PPC64)
686 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
687 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
689 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
690 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
692 #error Unknown Architecture
695 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
696 if (result
!= KERN_SUCCESS
) {
697 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
698 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
701 return user_count
* sizeof(usword_t
);
702 // end PLATFORM(DARWIN)
704 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
705 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
706 GetThreadContext(platformThread
.handle
, ®s
);
707 return sizeof(CONTEXT
);
709 #error Need a way to get thread registers on this platform
713 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
720 return reinterpret_cast<void*>(regs
.__esp
);
721 #elif PLATFORM(X86_64)
722 return reinterpret_cast<void*>(regs
.__rsp
);
723 #elif PLATFORM(PPC) || PLATFORM(PPC64)
724 return reinterpret_cast<void*>(regs
.__r1
);
726 return reinterpret_cast<void*>(regs
.__sp
);
728 #error Unknown Architecture
731 #else // !__DARWIN_UNIX03
734 return reinterpret_cast<void*>(regs
.esp
);
735 #elif PLATFORM(X86_64)
736 return reinterpret_cast<void*>(regs
.rsp
);
737 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
738 return reinterpret_cast<void*>(regs
.r1
);
740 #error Unknown Architecture
743 #endif // __DARWIN_UNIX03
745 // end PLATFORM(DARWIN)
746 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
747 return reinterpret_cast<void*>((uintptr_t) regs
.Esp
);
749 #error Need a way to get the stack pointer for another thread on this platform
753 void Heap::markOtherThreadConservatively(Thread
* thread
)
755 suspendThread(thread
->platformThread
);
757 PlatformThreadRegisters regs
;
758 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
760 // mark the thread's registers
761 markConservatively(static_cast<void*>(®s
), static_cast<void*>(reinterpret_cast<char*>(®s
) + regSize
));
763 void* stackPointer
= otherThreadStackPointer(regs
);
764 markConservatively(stackPointer
, thread
->stackBase
);
766 resumeThread(thread
->platformThread
);
771 void Heap::markStackObjectsConservatively()
773 markCurrentThreadConservatively();
775 #if ENABLE(JSC_MULTIPLE_THREADS)
777 if (m_currentThreadRegistrar
) {
779 MutexLocker
lock(m_registeredThreadsMutex
);
782 // Forbid malloc during the mark phase. Marking a thread suspends it, so
783 // a malloc inside mark() would risk a deadlock with a thread that had been
784 // suspended while holding the malloc lock.
787 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
788 // and since this is a shared heap, they are real locks.
789 for (Thread
* thread
= m_registeredThreads
; thread
; thread
= thread
->next
) {
790 if (!pthread_equal(thread
->posixThread
, pthread_self()))
791 markOtherThreadConservatively(thread
);
800 void Heap::setGCProtectNeedsLocking()
802 // Most clients do not need to call this, with the notable exception of WebCore.
803 // Clients that use shared heap have JSLock protection, while others are supposed
804 // to do explicit locking. WebCore violates this contract in Database code,
805 // which calls gcUnprotect from a secondary thread.
806 if (!m_protectedValuesMutex
)
807 m_protectedValuesMutex
.set(new Mutex
);
810 void Heap::protect(JSValuePtr k
)
813 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
818 if (m_protectedValuesMutex
)
819 m_protectedValuesMutex
->lock();
821 m_protectedValues
.add(k
.asCell());
823 if (m_protectedValuesMutex
)
824 m_protectedValuesMutex
->unlock();
827 void Heap::unprotect(JSValuePtr k
)
830 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData
->isSharedInstance
);
835 if (m_protectedValuesMutex
)
836 m_protectedValuesMutex
->lock();
838 m_protectedValues
.remove(k
.asCell());
840 if (m_protectedValuesMutex
)
841 m_protectedValuesMutex
->unlock();
844 Heap
* Heap::heap(JSValuePtr v
)
848 return Heap::cellBlock(v
.asCell())->heap
;
851 void Heap::markProtectedObjects()
853 if (m_protectedValuesMutex
)
854 m_protectedValuesMutex
->lock();
856 ProtectCountSet::iterator end
= m_protectedValues
.end();
857 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
) {
858 JSCell
* val
= it
->first
;
863 if (m_protectedValuesMutex
)
864 m_protectedValuesMutex
->unlock();
867 template <HeapType heapType
> size_t Heap::sweep()
869 typedef typename HeapConstants
<heapType
>::Block Block
;
870 typedef typename HeapConstants
<heapType
>::Cell Cell
;
872 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
873 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
875 size_t emptyBlocks
= 0;
876 size_t numLiveObjects
= heap
.numLiveObjects
;
878 for (size_t block
= 0; block
< heap
.usedBlocks
; block
++) {
879 Block
* curBlock
= reinterpret_cast<Block
*>(heap
.blocks
[block
]);
881 size_t usedCells
= curBlock
->usedCells
;
882 Cell
* freeList
= curBlock
->freeList
;
884 if (usedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
885 // special case with a block where all cells are used -- testing indicates this happens often
886 for (size_t i
= 0; i
< HeapConstants
<heapType
>::cellsPerBlock
; i
++) {
887 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
888 Cell
* cell
= curBlock
->cells
+ i
;
890 if (heapType
!= NumberHeap
) {
891 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
892 // special case for allocated but uninitialized object
893 // (We don't need this check earlier because nothing prior this point
894 // assumes the object has a valid vptr.)
895 if (cell
->u
.freeCell
.zeroIfFree
== 0)
904 // put cell on the free list
905 cell
->u
.freeCell
.zeroIfFree
= 0;
906 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
911 size_t minimumCellsToProcess
= usedCells
;
912 for (size_t i
= 0; (i
< minimumCellsToProcess
) & (i
< HeapConstants
<heapType
>::cellsPerBlock
); i
++) {
913 Cell
* cell
= curBlock
->cells
+ i
;
914 if (cell
->u
.freeCell
.zeroIfFree
== 0) {
915 ++minimumCellsToProcess
;
917 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
918 if (heapType
!= NumberHeap
) {
919 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
925 // put cell on the free list
926 cell
->u
.freeCell
.zeroIfFree
= 0;
927 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
934 curBlock
->usedCells
= static_cast<uint32_t>(usedCells
);
935 curBlock
->freeList
= freeList
;
936 curBlock
->marked
.clearAll();
938 if (usedCells
== 0) {
940 if (emptyBlocks
> SPARE_EMPTY_BLOCKS
) {
942 freeBlock(reinterpret_cast<CollectorBlock
*>(curBlock
));
944 // swap with the last block so we compact as we go
945 heap
.blocks
[block
] = heap
.blocks
[heap
.usedBlocks
- 1];
947 block
--; // Don't move forward a step in this case
949 if (heap
.numBlocks
> MIN_ARRAY_SIZE
&& heap
.usedBlocks
< heap
.numBlocks
/ LOW_WATER_FACTOR
) {
950 heap
.numBlocks
= heap
.numBlocks
/ GROWTH_FACTOR
;
951 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, heap
.numBlocks
* sizeof(CollectorBlock
*)));
957 if (heap
.numLiveObjects
!= numLiveObjects
)
958 heap
.firstBlockWithPossibleSpace
= 0;
960 heap
.numLiveObjects
= numLiveObjects
;
961 heap
.numLiveObjectsAtLastCollect
= numLiveObjects
;
963 return numLiveObjects
;
969 if (m_globalData
->isSharedInstance
) {
970 ASSERT(JSLock::lockCount() > 0);
971 ASSERT(JSLock::currentThreadIsHoldingLock());
975 ASSERT((primaryHeap
.operationInProgress
== NoOperation
) | (numberHeap
.operationInProgress
== NoOperation
));
976 if ((primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
))
979 JAVASCRIPTCORE_GC_BEGIN();
980 primaryHeap
.operationInProgress
= Collection
;
981 numberHeap
.operationInProgress
= Collection
;
983 // MARK: first mark all referenced objects recursively starting out from the set of root objects
985 markStackObjectsConservatively();
986 markProtectedObjects();
987 if (m_markListSet
&& m_markListSet
->size())
988 ArgList::markLists(*m_markListSet
);
989 if (m_globalData
->exception
&& !m_globalData
->exception
.marked())
990 m_globalData
->exception
.mark();
991 m_globalData
->interpreter
->registerFile().markCallFrames(this);
992 m_globalData
->smallStrings
.mark();
993 if (m_globalData
->scopeNodeBeingReparsed
)
994 m_globalData
->scopeNodeBeingReparsed
->mark();
996 JAVASCRIPTCORE_GC_MARKED();
998 size_t originalLiveObjects
= primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
;
999 size_t numLiveObjects
= sweep
<PrimaryHeap
>();
1000 numLiveObjects
+= sweep
<NumberHeap
>();
1002 primaryHeap
.operationInProgress
= NoOperation
;
1003 numberHeap
.operationInProgress
= NoOperation
;
1004 JAVASCRIPTCORE_GC_END(originalLiveObjects
, numLiveObjects
);
1006 return numLiveObjects
< originalLiveObjects
;
1009 size_t Heap::objectCount()
1011 return primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
- m_globalData
->smallStrings
.count();
1014 template <HeapType heapType
>
1015 static void addToStatistics(Heap::Statistics
& statistics
, const CollectorHeap
& heap
)
1017 typedef HeapConstants
<heapType
> HC
;
1018 for (size_t i
= 0; i
< heap
.usedBlocks
; ++i
) {
1019 if (heap
.blocks
[i
]) {
1020 statistics
.size
+= BLOCK_SIZE
;
1021 statistics
.free
+= (HC::cellsPerBlock
- heap
.blocks
[i
]->usedCells
) * HC::cellSize
;
1026 Heap::Statistics
Heap::statistics() const
1028 Statistics statistics
= { 0, 0 };
1029 JSC::addToStatistics
<PrimaryHeap
>(statistics
, primaryHeap
);
1030 JSC::addToStatistics
<NumberHeap
>(statistics
, numberHeap
);
1034 size_t Heap::globalObjectCount()
1037 if (JSGlobalObject
* head
= m_globalData
->head
) {
1038 JSGlobalObject
* o
= head
;
1042 } while (o
!= head
);
1047 size_t Heap::protectedGlobalObjectCount()
1049 if (m_protectedValuesMutex
)
1050 m_protectedValuesMutex
->lock();
1053 if (JSGlobalObject
* head
= m_globalData
->head
) {
1054 JSGlobalObject
* o
= head
;
1056 if (m_protectedValues
.contains(o
))
1059 } while (o
!= head
);
1062 if (m_protectedValuesMutex
)
1063 m_protectedValuesMutex
->unlock();
1068 size_t Heap::protectedObjectCount()
1070 if (m_protectedValuesMutex
)
1071 m_protectedValuesMutex
->lock();
1073 size_t result
= m_protectedValues
.size();
1075 if (m_protectedValuesMutex
)
1076 m_protectedValuesMutex
->unlock();
1081 static const char* typeName(JSCell
* cell
)
1083 if (cell
->isString())
1085 if (cell
->isNumber())
1087 if (cell
->isGetterSetter())
1088 return "gettersetter";
1089 ASSERT(cell
->isObject());
1090 const ClassInfo
* info
= static_cast<JSObject
*>(cell
)->classInfo();
1091 return info
? info
->className
: "Object";
1094 HashCountedSet
<const char*>* Heap::protectedObjectTypeCounts()
1096 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1098 if (m_protectedValuesMutex
)
1099 m_protectedValuesMutex
->lock();
1101 ProtectCountSet::iterator end
= m_protectedValues
.end();
1102 for (ProtectCountSet::iterator it
= m_protectedValues
.begin(); it
!= end
; ++it
)
1103 counts
->add(typeName(it
->first
));
1105 if (m_protectedValuesMutex
)
1106 m_protectedValuesMutex
->unlock();
1113 return (primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
);
1116 Heap::iterator
Heap::primaryHeapBegin()
1118 return iterator(primaryHeap
.blocks
, primaryHeap
.blocks
+ primaryHeap
.usedBlocks
);
1121 Heap::iterator
Heap::primaryHeapEnd()
1123 return iterator(primaryHeap
.blocks
+ primaryHeap
.usedBlocks
, primaryHeap
.blocks
+ primaryHeap
.usedBlocks
);