1 // -*- mode: c++; c-basic-offset: 4 -*-
3 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved.
4 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "collector.h"
25 #include "ExecState.h"
26 #include "JSGlobalObject.h"
34 #include <wtf/FastMalloc.h>
35 #include <wtf/HashCountedSet.h>
36 #include <wtf/UnusedParam.h>
38 #if USE(MULTIPLE_THREADS)
44 #include <mach/mach_port.h>
45 #include <mach/mach_init.h>
46 #include <mach/task.h>
47 #include <mach/thread_act.h>
48 #include <mach/vm_map.h>
50 #include "CollectorHeapIntrospector.h"
52 #elif PLATFORM(WIN_OS)
66 #if HAVE(PTHREAD_NP_H)
67 #include <pthread_np.h>
74 #define DEBUG_COLLECTOR 0
82 const size_t SPARE_EMPTY_BLOCKS
= 2;
83 const size_t MIN_ARRAY_SIZE
= 14;
84 const size_t GROWTH_FACTOR
= 2;
85 const size_t LOW_WATER_FACTOR
= 4;
86 const size_t ALLOCATIONS_PER_COLLECTION
= 4000;
88 enum OperationInProgress
{ NoOperation
, Allocation
, Collection
};
90 struct CollectorHeap
{
91 CollectorBlock
** blocks
;
94 size_t firstBlockWithPossibleSpace
;
96 size_t numLiveObjects
;
97 size_t numLiveObjectsAtLastCollect
;
100 OperationInProgress operationInProgress
;
103 static CollectorHeap primaryHeap
= { 0, 0, 0, 0, 0, 0, 0, NoOperation
};
104 static CollectorHeap numberHeap
= { 0, 0, 0, 0, 0, 0, 0, NoOperation
};
106 // FIXME: I don't think this needs to be a static data member of the Collector class.
107 // Just a private global like "heap" above would be fine.
108 size_t Collector::mainThreadOnlyObjectCount
= 0;
110 static CollectorBlock
* allocateBlock()
113 vm_address_t address
= 0;
114 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
115 #elif PLATFORM(WIN_OS)
116 // windows virtual address granularity is naturally 64k
117 LPVOID address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
118 #elif HAVE(POSIX_MEMALIGN)
120 posix_memalign(&address
, BLOCK_SIZE
, BLOCK_SIZE
);
121 memset(address
, 0, BLOCK_SIZE
);
123 static size_t pagesize
= getpagesize();
126 if (BLOCK_SIZE
> pagesize
)
127 extra
= BLOCK_SIZE
- pagesize
;
129 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
130 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
133 if ((address
& BLOCK_OFFSET_MASK
) != 0)
134 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
137 munmap(reinterpret_cast<void*>(address
), adjust
);
140 munmap(reinterpret_cast<void*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
143 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
146 return reinterpret_cast<CollectorBlock
*>(address
);
149 static void freeBlock(CollectorBlock
* block
)
152 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
153 #elif PLATFORM(WIN_OS)
154 VirtualFree(block
, BLOCK_SIZE
, MEM_RELEASE
);
155 #elif HAVE(POSIX_MEMALIGN)
158 munmap(block
, BLOCK_SIZE
);
162 void Collector::recordExtraCost(size_t cost
)
164 // Our frequency of garbage collection tries to balance memory use against speed
165 // by collecting based on the number of newly created values. However, for values
166 // that hold on to a great deal of memory that's not in the form of other JS values,
167 // that is not good enough - in some cases a lot of those objects can pile up and
168 // use crazy amounts of memory without a GC happening. So we track these extra
169 // memory costs. Only unusually large objects are noted, and we only keep track
170 // of this extra cost until the next GC. In garbage collected languages, most values
171 // are either very short lived temporaries, or have extremely long lifetimes. So
172 // if a large value survives one garbage collection, there is not much point to
173 // collecting more frequently as long as it stays alive.
174 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
176 primaryHeap
.extraCost
+= cost
;
179 template <Collector::HeapType heapType
> struct HeapConstants
;
181 template <> struct HeapConstants
<Collector::PrimaryHeap
> {
182 static const size_t cellSize
= CELL_SIZE
;
183 static const size_t cellsPerBlock
= CELLS_PER_BLOCK
;
184 static const size_t bitmapShift
= 0;
185 typedef CollectorCell Cell
;
186 typedef CollectorBlock Block
;
189 template <> struct HeapConstants
<Collector::NumberHeap
> {
190 static const size_t cellSize
= SMALL_CELL_SIZE
;
191 static const size_t cellsPerBlock
= SMALL_CELLS_PER_BLOCK
;
192 static const size_t bitmapShift
= 1;
193 typedef SmallCollectorCell Cell
;
194 typedef SmallCellCollectorBlock Block
;
197 template <Collector::HeapType heapType
> void* Collector::heapAllocate(size_t s
)
199 typedef typename HeapConstants
<heapType
>::Block Block
;
200 typedef typename HeapConstants
<heapType
>::Cell Cell
;
202 CollectorHeap
& heap
= heapType
== PrimaryHeap
? primaryHeap
: numberHeap
;
203 ASSERT(JSLock::lockCount() > 0);
204 ASSERT(JSLock::currentThreadIsHoldingLock());
205 ASSERT(s
<= HeapConstants
<heapType
>::cellSize
);
206 UNUSED_PARAM(s
); // s is now only used for the above assert
208 ASSERT(heap
.operationInProgress
== NoOperation
);
209 ASSERT(heapType
== PrimaryHeap
|| heap
.extraCost
== 0);
210 // FIXME: If another global variable access here doesn't hurt performance
211 // too much, we could abort() in NDEBUG builds, which could help ensure we
212 // don't spend any time debugging cases where we allocate inside an object's
213 // deallocation code.
215 size_t numLiveObjects
= heap
.numLiveObjects
;
216 size_t usedBlocks
= heap
.usedBlocks
;
217 size_t i
= heap
.firstBlockWithPossibleSpace
;
219 // if we have a huge amount of extra cost, we'll try to collect even if we still have
221 if (heapType
== PrimaryHeap
&& heap
.extraCost
> ALLOCATIONS_PER_COLLECTION
) {
222 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
223 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
224 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
225 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
)
229 ASSERT(heap
.operationInProgress
== NoOperation
);
231 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
232 heap
.operationInProgress
= Allocation
;
237 size_t targetBlockUsedCells
;
238 if (i
!= usedBlocks
) {
239 targetBlock
= (Block
*)heap
.blocks
[i
];
240 targetBlockUsedCells
= targetBlock
->usedCells
;
241 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
242 while (targetBlockUsedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
243 if (++i
== usedBlocks
)
245 targetBlock
= (Block
*)heap
.blocks
[i
];
246 targetBlockUsedCells
= targetBlock
->usedCells
;
247 ASSERT(targetBlockUsedCells
<= HeapConstants
<heapType
>::cellsPerBlock
);
249 heap
.firstBlockWithPossibleSpace
= i
;
253 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
254 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
255 const size_t newCost
= numNewObjects
+ heap
.extraCost
;
257 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
) {
259 heap
.operationInProgress
= NoOperation
;
261 bool collected
= collect();
263 heap
.operationInProgress
= Allocation
;
266 numLiveObjects
= heap
.numLiveObjects
;
267 usedBlocks
= heap
.usedBlocks
;
268 i
= heap
.firstBlockWithPossibleSpace
;
273 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
274 size_t numBlocks
= heap
.numBlocks
;
275 if (usedBlocks
== numBlocks
) {
276 numBlocks
= max(MIN_ARRAY_SIZE
, numBlocks
* GROWTH_FACTOR
);
277 heap
.numBlocks
= numBlocks
;
278 heap
.blocks
= static_cast<CollectorBlock
**>(fastRealloc(heap
.blocks
, numBlocks
* sizeof(CollectorBlock
*)));
281 targetBlock
= (Block
*)allocateBlock();
282 targetBlock
->freeList
= targetBlock
->cells
;
283 targetBlockUsedCells
= 0;
284 heap
.blocks
[usedBlocks
] = (CollectorBlock
*)targetBlock
;
285 heap
.usedBlocks
= usedBlocks
+ 1;
286 heap
.firstBlockWithPossibleSpace
= usedBlocks
;
289 // find a free spot in the block and detach it from the free list
290 Cell
*newCell
= targetBlock
->freeList
;
292 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
293 targetBlock
->freeList
= (newCell
+ 1) + newCell
->u
.freeCell
.next
;
295 targetBlock
->usedCells
= static_cast<uint32_t>(targetBlockUsedCells
+ 1);
296 heap
.numLiveObjects
= numLiveObjects
+ 1;
299 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
300 heap
.operationInProgress
= NoOperation
;
306 void* Collector::allocate(size_t s
)
308 return heapAllocate
<PrimaryHeap
>(s
);
311 void* Collector::allocateNumber(size_t s
)
313 return heapAllocate
<NumberHeap
>(s
);
316 static inline void* currentThreadStackBase()
319 pthread_t thread
= pthread_self();
320 return pthread_get_stackaddr_np(thread
);
321 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
322 // offset 0x18 from the FS segment register gives a pointer to
323 // the thread information block for the current thread
329 return (void*)pTib
->StackBase
;
330 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
331 PNT_TIB64 pTib
= reinterpret_cast<PNT_TIB64
>(NtCurrentTeb());
332 return (void*)pTib
->StackBase
;
333 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
334 // offset 0x18 from the FS segment register gives a pointer to
335 // the thread information block for the current thread
337 asm ( "movl %%fs:0x18, %0\n"
340 return (void*)pTib
->StackBase
;
341 #elif PLATFORM(SOLARIS)
346 static void* stackBase
= 0;
347 static size_t stackSize
= 0;
348 static pthread_t stackThread
;
349 pthread_t thread
= pthread_self();
350 if (stackBase
== 0 || thread
!= stackThread
) {
351 pthread_attr_t sattr
;
352 pthread_attr_init(&sattr
);
353 #if HAVE(PTHREAD_NP_H)
354 // e.g. on FreeBSD 5.4, neundorf@kde.org
355 pthread_attr_get_np(thread
, &sattr
);
357 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
358 pthread_getattr_np(thread
, &sattr
);
360 int rc
= pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
361 (void)rc
; // FIXME: Deal with error code somehow? Seems fatal.
363 pthread_attr_destroy(&sattr
);
364 stackThread
= thread
;
366 return static_cast<char*>(stackBase
) + stackSize
;
368 #error Need a way to get the stack base on this platform
372 #if USE(MULTIPLE_THREADS)
373 static pthread_t mainThread
;
376 void Collector::registerAsMainThread()
378 #if USE(MULTIPLE_THREADS)
379 mainThread
= pthread_self();
383 static inline bool onMainThread()
385 #if USE(MULTIPLE_THREADS)
387 pthread_t javaScriptCollectionThread
= JSJavaScriptCollectionThread();
388 return javaScriptCollectionThread
== 0 || javaScriptCollectionThread
== pthread_self();
390 return !!pthread_equal(pthread_self(), mainThread
);
397 #if USE(MULTIPLE_THREADS)
400 typedef mach_port_t PlatformThread
;
401 #elif PLATFORM(WIN_OS)
402 struct PlatformThread
{
403 PlatformThread(DWORD _id
, HANDLE _handle
) : id(_id
), handle(_handle
) {}
409 static inline PlatformThread
getCurrentPlatformThread()
412 return pthread_mach_thread_np(pthread_self());
413 #elif PLATFORM(WIN_OS)
414 HANDLE threadHandle
= pthread_getw32threadhandle_np(pthread_self());
415 return PlatformThread(GetCurrentThreadId(), threadHandle
);
419 class Collector::Thread
{
421 Thread(pthread_t pthread
, const PlatformThread
& platThread
, void* base
)
422 : posixThread(pthread
), platformThread(platThread
), stackBase(base
) {}
424 pthread_t posixThread
;
425 PlatformThread platformThread
;
429 pthread_key_t registeredThreadKey
;
430 pthread_once_t registeredThreadKeyOnce
= PTHREAD_ONCE_INIT
;
431 Collector::Thread
* registeredThreads
;
433 static void destroyRegisteredThread(void* data
)
435 Collector::Thread
* thread
= (Collector::Thread
*)data
;
437 // Can't use JSLock convenience object here because we don't want to re-register
438 // an exiting thread.
441 if (registeredThreads
== thread
) {
442 registeredThreads
= registeredThreads
->next
;
444 Collector::Thread
*last
= registeredThreads
;
445 Collector::Thread
*t
;
446 for (t
= registeredThreads
->next
; t
!= NULL
; t
= t
->next
) {
448 last
->next
= t
->next
;
453 ASSERT(t
); // If t is NULL, we never found ourselves in the list.
461 static void initializeRegisteredThreadKey()
463 pthread_key_create(®isteredThreadKey
, destroyRegisteredThread
);
466 void Collector::registerThread()
468 ASSERT(JSLock::lockCount() > 0);
469 ASSERT(JSLock::currentThreadIsHoldingLock());
471 pthread_once(®isteredThreadKeyOnce
, initializeRegisteredThreadKey
);
473 if (!pthread_getspecific(registeredThreadKey
)) {
476 CollectorHeapIntrospector::init(&primaryHeap
, &numberHeap
);
479 Collector::Thread
*thread
= new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
481 thread
->next
= registeredThreads
;
482 registeredThreads
= thread
;
483 pthread_setspecific(registeredThreadKey
, thread
);
489 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
491 // cell size needs to be a power of two for this to be valid
492 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
494 void Collector::markStackObjectsConservatively(void *start
, void *end
)
502 ASSERT(((char*)end
- (char*)start
) < 0x1000000);
503 ASSERT(IS_POINTER_ALIGNED(start
));
504 ASSERT(IS_POINTER_ALIGNED(end
));
506 char** p
= (char**)start
;
507 char** e
= (char**)end
;
509 size_t usedPrimaryBlocks
= primaryHeap
.usedBlocks
;
510 size_t usedNumberBlocks
= numberHeap
.usedBlocks
;
511 CollectorBlock
**primaryBlocks
= primaryHeap
.blocks
;
512 CollectorBlock
**numberBlocks
= numberHeap
.blocks
;
514 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
518 if (IS_HALF_CELL_ALIGNED(x
) && x
) {
519 uintptr_t xAsBits
= reinterpret_cast<uintptr_t>(x
);
520 xAsBits
&= CELL_ALIGN_MASK
;
521 uintptr_t offset
= xAsBits
& BLOCK_OFFSET_MASK
;
522 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(xAsBits
- offset
);
523 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
524 for (size_t block
= 0; block
< usedNumberBlocks
; block
++) {
525 if ((numberBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
526 Collector::markCell(reinterpret_cast<JSCell
*>(xAsBits
));
531 // Mark the primary heap
532 for (size_t block
= 0; block
< usedPrimaryBlocks
; block
++) {
533 if ((primaryBlocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
534 if (((CollectorCell
*)xAsBits
)->u
.freeCell
.zeroIfFree
!= 0) {
535 JSCell
* imp
= reinterpret_cast<JSCell
*>(xAsBits
);
548 void Collector::markCurrentThreadConservatively()
550 // setjmp forces volatile registers onto the stack
553 #pragma warning(push)
554 #pragma warning(disable: 4611)
562 void* stackPointer
= &dummy
;
563 void* stackBase
= currentThreadStackBase();
565 markStackObjectsConservatively(stackPointer
, stackBase
);
568 #if USE(MULTIPLE_THREADS)
570 static inline void suspendThread(const PlatformThread
& platformThread
)
573 thread_suspend(platformThread
);
574 #elif PLATFORM(WIN_OS)
575 SuspendThread(platformThread
.handle
);
577 #error Need a way to suspend threads on this platform
581 static inline void resumeThread(const PlatformThread
& platformThread
)
584 thread_resume(platformThread
);
585 #elif PLATFORM(WIN_OS)
586 ResumeThread(platformThread
.handle
);
588 #error Need a way to resume threads on this platform
592 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
597 typedef i386_thread_state_t PlatformThreadRegisters
;
598 #elif PLATFORM(X86_64)
599 typedef x86_thread_state64_t PlatformThreadRegisters
;
601 typedef ppc_thread_state_t PlatformThreadRegisters
;
602 #elif PLATFORM(PPC64)
603 typedef ppc_thread_state64_t PlatformThreadRegisters
;
605 typedef arm_thread_state_t PlatformThreadRegisters
;
607 #error Unknown Architecture
610 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
611 typedef CONTEXT PlatformThreadRegisters
;
613 #error Need a thread register struct for this platform
616 size_t getPlatformThreadRegisters(const PlatformThread
& platformThread
, PlatformThreadRegisters
& regs
)
621 unsigned user_count
= sizeof(regs
)/sizeof(int);
622 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
623 #elif PLATFORM(X86_64)
624 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
625 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
627 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
628 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
629 #elif PLATFORM(PPC64)
630 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
631 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
633 unsigned user_count
= ARM_THREAD_STATE_COUNT
;
634 thread_state_flavor_t flavor
= ARM_THREAD_STATE
;
636 #error Unknown Architecture
639 kern_return_t result
= thread_get_state(platformThread
, flavor
, (thread_state_t
)®s
, &user_count
);
640 if (result
!= KERN_SUCCESS
) {
641 WTFReportFatalError(__FILE__
, __LINE__
, WTF_PRETTY_FUNCTION
,
642 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result
);
645 return user_count
* sizeof(usword_t
);
646 // end PLATFORM(DARWIN)
648 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
649 regs
.ContextFlags
= CONTEXT_INTEGER
| CONTEXT_CONTROL
| CONTEXT_SEGMENTS
;
650 GetThreadContext(platformThread
.handle
, ®s
);
651 return sizeof(CONTEXT
);
653 #error Need a way to get thread registers on this platform
657 static inline void* otherThreadStackPointer(const PlatformThreadRegisters
& regs
)
664 return (void*)regs
.__esp
;
665 #elif PLATFORM(X86_64)
666 return (void*)regs
.__rsp
;
667 #elif PLATFORM(PPC) || PLATFORM(PPC64)
668 return (void*)regs
.__r1
;
670 return (void*)regs
.__sp
;
672 #error Unknown Architecture
675 #else // !__DARWIN_UNIX03
678 return (void*)regs
.esp
;
679 #elif PLATFORM(X86_64)
680 return (void*)regs
.rsp
;
681 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
682 return (void*)regs
.r1
;
684 #error Unknown Architecture
687 #endif // __DARWIN_UNIX03
689 // end PLATFORM(DARWIN)
690 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
691 return (void*)(uintptr_t)regs
.Esp
;
693 #error Need a way to get the stack pointer for another thread on this platform
697 void Collector::markOtherThreadConservatively(Thread
* thread
)
699 suspendThread(thread
->platformThread
);
701 PlatformThreadRegisters regs
;
702 size_t regSize
= getPlatformThreadRegisters(thread
->platformThread
, regs
);
704 // mark the thread's registers
705 markStackObjectsConservatively((void*)®s
, (void*)((char*)®s
+ regSize
));
707 void* stackPointer
= otherThreadStackPointer(regs
);
708 markStackObjectsConservatively(stackPointer
, thread
->stackBase
);
710 resumeThread(thread
->platformThread
);
715 void Collector::markStackObjectsConservatively()
717 markCurrentThreadConservatively();
719 #if USE(MULTIPLE_THREADS)
720 for (Thread
*thread
= registeredThreads
; thread
!= NULL
; thread
= thread
->next
) {
721 if (!pthread_equal(thread
->posixThread
, pthread_self())) {
722 markOtherThreadConservatively(thread
);
728 typedef HashCountedSet
<JSCell
*> ProtectCountSet
;
730 static ProtectCountSet
& protectedValues()
732 static ProtectCountSet staticProtectCountSet
;
733 return staticProtectCountSet
;
736 void Collector::protect(JSValue
*k
)
739 ASSERT(JSLock::lockCount() > 0);
740 ASSERT(JSLock::currentThreadIsHoldingLock());
742 if (JSImmediate::isImmediate(k
))
745 protectedValues().add(k
->asCell());
748 void Collector::unprotect(JSValue
*k
)
751 ASSERT(JSLock::lockCount() > 0);
752 ASSERT(JSLock::currentThreadIsHoldingLock());
754 if (JSImmediate::isImmediate(k
))
757 protectedValues().remove(k
->asCell());
760 void Collector::collectOnMainThreadOnly(JSValue
* value
)
763 ASSERT(JSLock::lockCount() > 0);
764 ASSERT(JSLock::currentThreadIsHoldingLock());
766 if (JSImmediate::isImmediate(value
))
769 JSCell
* cell
= value
->asCell();
770 cellBlock(cell
)->collectOnMainThreadOnly
.set(cellOffset(cell
));
771 ++mainThreadOnlyObjectCount
;
774 void Collector::markProtectedObjects()
776 ProtectCountSet
& protectedValues
= KJS::protectedValues();
777 ProtectCountSet::iterator end
= protectedValues
.end();
778 for (ProtectCountSet::iterator it
= protectedValues
.begin(); it
!= end
; ++it
) {
779 JSCell
*val
= it
->first
;
785 void Collector::markMainThreadOnlyObjects()
787 #if USE(MULTIPLE_THREADS)
788 ASSERT(!onMainThread());
791 // Optimization for clients that never register "main thread only" objects.
792 if (!mainThreadOnlyObjectCount
)
795 // FIXME: We can optimize this marking algorithm by keeping an exact set of
796 // "main thread only" objects when the "main thread only" object count is
797 // small. We don't want to keep an exact set all the time, because WebCore
798 // tends to create lots of "main thread only" objects, and all that set
799 // thrashing can be expensive.
803 // We don't look at the numberHeap as primitive values can never be marked as main thread only
804 for (size_t block
= 0; block
< primaryHeap
.usedBlocks
; block
++) {
805 ASSERT(count
< mainThreadOnlyObjectCount
);
807 CollectorBlock
* curBlock
= primaryHeap
.blocks
[block
];
808 size_t minimumCellsToProcess
= curBlock
->usedCells
;
809 for (size_t i
= 0; (i
< minimumCellsToProcess
) & (i
< CELLS_PER_BLOCK
); i
++) {
810 CollectorCell
* cell
= curBlock
->cells
+ i
;
811 if (cell
->u
.freeCell
.zeroIfFree
== 0)
812 ++minimumCellsToProcess
;
814 if (curBlock
->collectOnMainThreadOnly
.get(i
)) {
815 if (!curBlock
->marked
.get(i
)) {
816 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
819 if (++count
== mainThreadOnlyObjectCount
)
827 template <Collector::HeapType heapType
> size_t Collector::sweep(bool currentThreadIsMainThread
)
829 typedef typename HeapConstants
<heapType
>::Block Block
;
830 typedef typename HeapConstants
<heapType
>::Cell Cell
;
832 UNUSED_PARAM(currentThreadIsMainThread
); // currentThreadIsMainThread is only used in ASSERTs
833 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
834 CollectorHeap
& heap
= heapType
== Collector::PrimaryHeap
? primaryHeap
: numberHeap
;
836 size_t emptyBlocks
= 0;
837 size_t numLiveObjects
= heap
.numLiveObjects
;
839 for (size_t block
= 0; block
< heap
.usedBlocks
; block
++) {
840 Block
* curBlock
= (Block
*)heap
.blocks
[block
];
842 size_t usedCells
= curBlock
->usedCells
;
843 Cell
* freeList
= curBlock
->freeList
;
845 if (usedCells
== HeapConstants
<heapType
>::cellsPerBlock
) {
846 // special case with a block where all cells are used -- testing indicates this happens often
847 for (size_t i
= 0; i
< HeapConstants
<heapType
>::cellsPerBlock
; i
++) {
848 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
849 Cell
* cell
= curBlock
->cells
+ i
;
851 if (heapType
!= Collector::NumberHeap
) {
852 JSCell
* imp
= reinterpret_cast<JSCell
*>(cell
);
853 // special case for allocated but uninitialized object
854 // (We don't need this check earlier because nothing prior this point
855 // assumes the object has a valid vptr.)
856 if (cell
->u
.freeCell
.zeroIfFree
== 0)
859 ASSERT(currentThreadIsMainThread
|| !curBlock
->collectOnMainThreadOnly
.get(i
));
860 if (curBlock
->collectOnMainThreadOnly
.get(i
)) {
861 curBlock
->collectOnMainThreadOnly
.clear(i
);
862 --Collector::mainThreadOnlyObjectCount
;
870 // put cell on the free list
871 cell
->u
.freeCell
.zeroIfFree
= 0;
872 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
877 size_t minimumCellsToProcess
= usedCells
;
878 for (size_t i
= 0; (i
< minimumCellsToProcess
) & (i
< HeapConstants
<heapType
>::cellsPerBlock
); i
++) {
879 Cell
*cell
= curBlock
->cells
+ i
;
880 if (cell
->u
.freeCell
.zeroIfFree
== 0) {
881 ++minimumCellsToProcess
;
883 if (!curBlock
->marked
.get(i
>> HeapConstants
<heapType
>::bitmapShift
)) {
884 if (heapType
!= Collector::NumberHeap
) {
885 JSCell
*imp
= reinterpret_cast<JSCell
*>(cell
);
886 ASSERT(currentThreadIsMainThread
|| !curBlock
->collectOnMainThreadOnly
.get(i
));
887 if (curBlock
->collectOnMainThreadOnly
.get(i
)) {
888 curBlock
->collectOnMainThreadOnly
.clear(i
);
889 --Collector::mainThreadOnlyObjectCount
;
896 // put cell on the free list
897 cell
->u
.freeCell
.zeroIfFree
= 0;
898 cell
->u
.freeCell
.next
= freeList
- (cell
+ 1);
905 curBlock
->usedCells
= static_cast<uint32_t>(usedCells
);
906 curBlock
->freeList
= freeList
;
907 curBlock
->marked
.clearAll();
909 if (usedCells
== 0) {
911 if (emptyBlocks
> SPARE_EMPTY_BLOCKS
) {
913 freeBlock((CollectorBlock
*)curBlock
);
915 // swap with the last block so we compact as we go
916 heap
.blocks
[block
] = heap
.blocks
[heap
.usedBlocks
- 1];
918 block
--; // Don't move forward a step in this case
920 if (heap
.numBlocks
> MIN_ARRAY_SIZE
&& heap
.usedBlocks
< heap
.numBlocks
/ LOW_WATER_FACTOR
) {
921 heap
.numBlocks
= heap
.numBlocks
/ GROWTH_FACTOR
;
922 heap
.blocks
= (CollectorBlock
**)fastRealloc(heap
.blocks
, heap
.numBlocks
* sizeof(CollectorBlock
*));
928 if (heap
.numLiveObjects
!= numLiveObjects
)
929 heap
.firstBlockWithPossibleSpace
= 0;
931 heap
.numLiveObjects
= numLiveObjects
;
932 heap
.numLiveObjectsAtLastCollect
= numLiveObjects
;
934 return numLiveObjects
;
937 bool Collector::collect()
939 ASSERT(JSLock::lockCount() > 0);
940 ASSERT(JSLock::currentThreadIsHoldingLock());
942 ASSERT((primaryHeap
.operationInProgress
== NoOperation
) | (numberHeap
.operationInProgress
== NoOperation
));
943 if ((primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
))
946 primaryHeap
.operationInProgress
= Collection
;
947 numberHeap
.operationInProgress
= Collection
;
949 bool currentThreadIsMainThread
= onMainThread();
951 // MARK: first mark all referenced objects recursively starting out from the set of root objects
954 // Forbid malloc during the mark phase. Marking a thread suspends it, so
955 // a malloc inside mark() would risk a deadlock with a thread that had been
956 // suspended while holding the malloc lock.
960 markStackObjectsConservatively();
961 markProtectedObjects();
962 ExecState::markActiveExecStates();
963 List::markProtectedLists();
964 #if USE(MULTIPLE_THREADS)
965 if (!currentThreadIsMainThread
)
966 markMainThreadOnlyObjects();
973 size_t originalLiveObjects
= primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
;
974 size_t numLiveObjects
= sweep
<PrimaryHeap
>(currentThreadIsMainThread
);
975 numLiveObjects
+= sweep
<NumberHeap
>(currentThreadIsMainThread
);
977 primaryHeap
.operationInProgress
= NoOperation
;
978 numberHeap
.operationInProgress
= NoOperation
;
980 return numLiveObjects
< originalLiveObjects
;
983 size_t Collector::size()
985 return primaryHeap
.numLiveObjects
+ numberHeap
.numLiveObjects
;
988 size_t Collector::globalObjectCount()
991 if (JSGlobalObject::head()) {
992 JSGlobalObject
* o
= JSGlobalObject::head();
996 } while (o
!= JSGlobalObject::head());
1001 size_t Collector::protectedGlobalObjectCount()
1004 if (JSGlobalObject::head()) {
1005 JSGlobalObject
* o
= JSGlobalObject::head();
1007 if (protectedValues().contains(o
))
1010 } while (o
!= JSGlobalObject::head());
1015 size_t Collector::protectedObjectCount()
1017 return protectedValues().size();
1020 static const char *typeName(JSCell
*val
)
1022 const char *name
= "???";
1023 switch (val
->type()) {
1024 case UnspecifiedType
:
1042 const ClassInfo
*info
= static_cast<JSObject
*>(val
)->classInfo();
1043 name
= info
? info
->className
: "Object";
1046 case GetterSetterType
:
1047 name
= "gettersetter";
1053 HashCountedSet
<const char*>* Collector::protectedObjectTypeCounts()
1055 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
1057 ProtectCountSet
& protectedValues
= KJS::protectedValues();
1058 ProtectCountSet::iterator end
= protectedValues
.end();
1059 for (ProtectCountSet::iterator it
= protectedValues
.begin(); it
!= end
; ++it
)
1060 counts
->add(typeName(it
->first
));
1065 bool Collector::isBusy()
1067 return (primaryHeap
.operationInProgress
!= NoOperation
) | (numberHeap
.operationInProgress
!= NoOperation
);
1070 void Collector::reportOutOfMemoryToAllExecStates()
1072 ExecStateStack::const_iterator end
= ExecState::activeExecStates().end();
1073 for (ExecStateStack::const_iterator it
= ExecState::activeExecStates().begin(); it
!= end
; ++it
) {
1074 (*it
)->setException(Error::create(*it
, GeneralError
, "Out of memory"));