]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/Collector.cpp
3d42bad860830858455bb719f23c2492c0dd8956
[apple/javascriptcore.git] / runtime / Collector.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Collector.h"
23
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
29 #include "JSLock.h"
30 #include "JSONObject.h"
31 #include "JSString.h"
32 #include "JSValue.h"
33 #include "Nodes.h"
34 #include "Tracing.h"
35 #include <algorithm>
36 #include <limits.h>
37 #include <setjmp.h>
38 #include <stdlib.h>
39 #include <wtf/FastMalloc.h>
40 #include <wtf/HashCountedSet.h>
41 #include <wtf/UnusedParam.h>
42 #include <wtf/VMTags.h>
43
44 #if PLATFORM(DARWIN)
45
46 #include <mach/mach_init.h>
47 #include <mach/mach_port.h>
48 #include <mach/task.h>
49 #include <mach/thread_act.h>
50 #include <mach/vm_map.h>
51
52 #elif PLATFORM(WIN_OS)
53
54 #include <windows.h>
55
56 #elif PLATFORM(UNIX)
57
58 #include <stdlib.h>
59 #include <sys/mman.h>
60 #include <unistd.h>
61
62 #if PLATFORM(SOLARIS)
63 #include <thread.h>
64 #else
65 #include <pthread.h>
66 #endif
67
68 #if HAVE(PTHREAD_NP_H)
69 #include <pthread_np.h>
70 #endif
71
72 #endif
73
74 #define DEBUG_COLLECTOR 0
75 #define COLLECT_ON_EVERY_ALLOCATION 0
76
77 using std::max;
78
79 namespace JSC {
80
81 // tunable parameters
82
83 const size_t SPARE_EMPTY_BLOCKS = 2;
84 const size_t GROWTH_FACTOR = 2;
85 const size_t LOW_WATER_FACTOR = 4;
86 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
87 // This value has to be a macro to be used in max() without introducing
88 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
89 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
90
91 static void freeHeap(CollectorHeap*);
92
93 #if ENABLE(JSC_MULTIPLE_THREADS)
94
95 #if PLATFORM(DARWIN)
96 typedef mach_port_t PlatformThread;
97 #elif PLATFORM(WIN_OS)
98 struct PlatformThread {
99 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
100 DWORD id;
101 HANDLE handle;
102 };
103 #endif
104
105 class Heap::Thread {
106 public:
107 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
108 : posixThread(pthread)
109 , platformThread(platThread)
110 , stackBase(base)
111 {
112 }
113
114 Thread* next;
115 pthread_t posixThread;
116 PlatformThread platformThread;
117 void* stackBase;
118 };
119
120 #endif
121
122 Heap::Heap(JSGlobalData* globalData)
123 : m_markListSet(0)
124 #if ENABLE(JSC_MULTIPLE_THREADS)
125 , m_registeredThreads(0)
126 , m_currentThreadRegistrar(0)
127 #endif
128 , m_globalData(globalData)
129 {
130 ASSERT(globalData);
131
132 memset(&primaryHeap, 0, sizeof(CollectorHeap));
133 memset(&numberHeap, 0, sizeof(CollectorHeap));
134 }
135
136 Heap::~Heap()
137 {
138 // The destroy function must already have been called, so assert this.
139 ASSERT(!m_globalData);
140 }
141
142 void Heap::destroy()
143 {
144 JSLock lock(false);
145
146 if (!m_globalData)
147 return;
148
149 // The global object is not GC protected at this point, so sweeping may delete it
150 // (and thus the global data) before other objects that may use the global data.
151 RefPtr<JSGlobalData> protect(m_globalData);
152
153 delete m_markListSet;
154 m_markListSet = 0;
155
156 sweep<PrimaryHeap>();
157 // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
158
159 ASSERT(!primaryHeap.numLiveObjects);
160
161 freeHeap(&primaryHeap);
162 freeHeap(&numberHeap);
163
164 #if ENABLE(JSC_MULTIPLE_THREADS)
165 if (m_currentThreadRegistrar) {
166 int error = pthread_key_delete(m_currentThreadRegistrar);
167 ASSERT_UNUSED(error, !error);
168 }
169
170 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
171 for (Heap::Thread* t = m_registeredThreads; t;) {
172 Heap::Thread* next = t->next;
173 delete t;
174 t = next;
175 }
176 #endif
177
178 m_globalData = 0;
179 }
180
181 template <HeapType heapType>
182 static NEVER_INLINE CollectorBlock* allocateBlock()
183 {
184 #if PLATFORM(DARWIN)
185 vm_address_t address = 0;
186 // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
187 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
188 #elif PLATFORM(SYMBIAN)
189 // no memory map in symbian, need to hack with fastMalloc
190 void* address = fastMalloc(BLOCK_SIZE);
191 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
192 #elif PLATFORM(WIN_OS)
193 // windows virtual address granularity is naturally 64k
194 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
195 #elif HAVE(POSIX_MEMALIGN)
196 void* address;
197 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
198 memset(address, 0, BLOCK_SIZE);
199 #else
200
201 #if ENABLE(JSC_MULTIPLE_THREADS)
202 #error Need to initialize pagesize safely.
203 #endif
204 static size_t pagesize = getpagesize();
205
206 size_t extra = 0;
207 if (BLOCK_SIZE > pagesize)
208 extra = BLOCK_SIZE - pagesize;
209
210 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
211 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
212
213 size_t adjust = 0;
214 if ((address & BLOCK_OFFSET_MASK) != 0)
215 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
216
217 if (adjust > 0)
218 munmap(reinterpret_cast<char*>(address), adjust);
219
220 if (adjust < extra)
221 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
222
223 address += adjust;
224 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
225 #endif
226 reinterpret_cast<CollectorBlock*>(address)->type = heapType;
227 return reinterpret_cast<CollectorBlock*>(address);
228 }
229
230 static void freeBlock(CollectorBlock* block)
231 {
232 #if PLATFORM(DARWIN)
233 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
234 #elif PLATFORM(SYMBIAN)
235 fastFree(block);
236 #elif PLATFORM(WIN_OS)
237 VirtualFree(block, 0, MEM_RELEASE);
238 #elif HAVE(POSIX_MEMALIGN)
239 free(block);
240 #else
241 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
242 #endif
243 }
244
245 static void freeHeap(CollectorHeap* heap)
246 {
247 for (size_t i = 0; i < heap->usedBlocks; ++i)
248 if (heap->blocks[i])
249 freeBlock(heap->blocks[i]);
250 fastFree(heap->blocks);
251 memset(heap, 0, sizeof(CollectorHeap));
252 }
253
254 void Heap::recordExtraCost(size_t cost)
255 {
256 // Our frequency of garbage collection tries to balance memory use against speed
257 // by collecting based on the number of newly created values. However, for values
258 // that hold on to a great deal of memory that's not in the form of other JS values,
259 // that is not good enough - in some cases a lot of those objects can pile up and
260 // use crazy amounts of memory without a GC happening. So we track these extra
261 // memory costs. Only unusually large objects are noted, and we only keep track
262 // of this extra cost until the next GC. In garbage collected languages, most values
263 // are either very short lived temporaries, or have extremely long lifetimes. So
264 // if a large value survives one garbage collection, there is not much point to
265 // collecting more frequently as long as it stays alive.
266 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
267
268 primaryHeap.extraCost += cost;
269 }
270
271 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
272 {
273 typedef typename HeapConstants<heapType>::Block Block;
274 typedef typename HeapConstants<heapType>::Cell Cell;
275
276 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
277 ASSERT(JSLock::lockCount() > 0);
278 ASSERT(JSLock::currentThreadIsHoldingLock());
279 ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize);
280
281 ASSERT(heap.operationInProgress == NoOperation);
282 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
283 // FIXME: If another global variable access here doesn't hurt performance
284 // too much, we could CRASH() in NDEBUG builds, which could help ensure we
285 // don't spend any time debugging cases where we allocate inside an object's
286 // deallocation code.
287
288 #if COLLECT_ON_EVERY_ALLOCATION
289 collect();
290 #endif
291
292 size_t numLiveObjects = heap.numLiveObjects;
293 size_t usedBlocks = heap.usedBlocks;
294 size_t i = heap.firstBlockWithPossibleSpace;
295
296 // if we have a huge amount of extra cost, we'll try to collect even if we still have
297 // free cells left.
298 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
299 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
300 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
301 const size_t newCost = numNewObjects + heap.extraCost;
302 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
303 goto collect;
304 }
305
306 ASSERT(heap.operationInProgress == NoOperation);
307 #ifndef NDEBUG
308 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
309 heap.operationInProgress = Allocation;
310 #endif
311
312 scan:
313 Block* targetBlock;
314 size_t targetBlockUsedCells;
315 if (i != usedBlocks) {
316 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
317 targetBlockUsedCells = targetBlock->usedCells;
318 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
319 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
320 if (++i == usedBlocks)
321 goto collect;
322 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
323 targetBlockUsedCells = targetBlock->usedCells;
324 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
325 }
326 heap.firstBlockWithPossibleSpace = i;
327 } else {
328
329 collect:
330 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
331 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
332 const size_t newCost = numNewObjects + heap.extraCost;
333
334 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
335 #ifndef NDEBUG
336 heap.operationInProgress = NoOperation;
337 #endif
338 bool collected = collect();
339 #ifndef NDEBUG
340 heap.operationInProgress = Allocation;
341 #endif
342 if (collected) {
343 numLiveObjects = heap.numLiveObjects;
344 usedBlocks = heap.usedBlocks;
345 i = heap.firstBlockWithPossibleSpace;
346 goto scan;
347 }
348 }
349
350 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
351 size_t numBlocks = heap.numBlocks;
352 if (usedBlocks == numBlocks) {
353 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
354 if (numBlocks > maxNumBlocks)
355 CRASH();
356 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
357 heap.numBlocks = numBlocks;
358 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
359 }
360
361 targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>());
362 targetBlock->freeList = targetBlock->cells;
363 targetBlock->heap = this;
364 targetBlockUsedCells = 0;
365 heap.blocks[usedBlocks] = reinterpret_cast<CollectorBlock*>(targetBlock);
366 heap.usedBlocks = usedBlocks + 1;
367 heap.firstBlockWithPossibleSpace = usedBlocks;
368 }
369
370 // find a free spot in the block and detach it from the free list
371 Cell* newCell = targetBlock->freeList;
372
373 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
374 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
375
376 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
377 heap.numLiveObjects = numLiveObjects + 1;
378
379 #ifndef NDEBUG
380 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
381 heap.operationInProgress = NoOperation;
382 #endif
383
384 return newCell;
385 }
386
387 void* Heap::allocate(size_t s)
388 {
389 return heapAllocate<PrimaryHeap>(s);
390 }
391
392 void* Heap::allocateNumber(size_t s)
393 {
394 return heapAllocate<NumberHeap>(s);
395 }
396
397 #if PLATFORM(WINCE)
398 void* g_stackBase = 0;
399
400 inline bool isPageWritable(void* page)
401 {
402 MEMORY_BASIC_INFORMATION memoryInformation;
403 DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
404
405 // return false on error, including ptr outside memory
406 if (result != sizeof(memoryInformation))
407 return false;
408
409 DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
410 return protect == PAGE_READWRITE
411 || protect == PAGE_WRITECOPY
412 || protect == PAGE_EXECUTE_READWRITE
413 || protect == PAGE_EXECUTE_WRITECOPY;
414 }
415
416 static void* getStackBase(void* previousFrame)
417 {
418 // find the address of this stack frame by taking the address of a local variable
419 bool isGrowingDownward;
420 void* thisFrame = (void*)(&isGrowingDownward);
421
422 isGrowingDownward = previousFrame < &thisFrame;
423 static DWORD pageSize = 0;
424 if (!pageSize) {
425 SYSTEM_INFO systemInfo;
426 GetSystemInfo(&systemInfo);
427 pageSize = systemInfo.dwPageSize;
428 }
429
430 // scan all of memory starting from this frame, and return the last writeable page found
431 register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
432 if (isGrowingDownward) {
433 while (currentPage > 0) {
434 // check for underflow
435 if (currentPage >= (char*)pageSize)
436 currentPage -= pageSize;
437 else
438 currentPage = 0;
439 if (!isPageWritable(currentPage))
440 return currentPage + pageSize;
441 }
442 return 0;
443 } else {
444 while (true) {
445 // guaranteed to complete because isPageWritable returns false at end of memory
446 currentPage += pageSize;
447 if (!isPageWritable(currentPage))
448 return currentPage;
449 }
450 }
451 }
452 #endif
453
454 static inline void* currentThreadStackBase()
455 {
456 #if PLATFORM(DARWIN)
457 pthread_t thread = pthread_self();
458 return pthread_get_stackaddr_np(thread);
459 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
460 // offset 0x18 from the FS segment register gives a pointer to
461 // the thread information block for the current thread
462 NT_TIB* pTib;
463 __asm {
464 MOV EAX, FS:[18h]
465 MOV pTib, EAX
466 }
467 return static_cast<void*>(pTib->StackBase);
468 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
469 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
470 return reinterpret_cast<void*>(pTib->StackBase);
471 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
472 // offset 0x18 from the FS segment register gives a pointer to
473 // the thread information block for the current thread
474 NT_TIB* pTib;
475 asm ( "movl %%fs:0x18, %0\n"
476 : "=r" (pTib)
477 );
478 return static_cast<void*>(pTib->StackBase);
479 #elif PLATFORM(SOLARIS)
480 stack_t s;
481 thr_stksegment(&s);
482 return s.ss_sp;
483 #elif PLATFORM(OPENBSD)
484 pthread_t thread = pthread_self();
485 stack_t stack;
486 pthread_stackseg_np(thread, &stack);
487 return stack.ss_sp;
488 #elif PLATFORM(SYMBIAN)
489 static void* stackBase = 0;
490 if (stackBase == 0) {
491 TThreadStackInfo info;
492 RThread thread;
493 thread.StackInfo(info);
494 stackBase = (void*)info.iBase;
495 }
496 return (void*)stackBase;
497 #elif PLATFORM(UNIX)
498 static void* stackBase = 0;
499 static size_t stackSize = 0;
500 static pthread_t stackThread;
501 pthread_t thread = pthread_self();
502 if (stackBase == 0 || thread != stackThread) {
503 pthread_attr_t sattr;
504 pthread_attr_init(&sattr);
505 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
506 // e.g. on FreeBSD 5.4, neundorf@kde.org
507 pthread_attr_get_np(thread, &sattr);
508 #else
509 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
510 pthread_getattr_np(thread, &sattr);
511 #endif
512 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
513 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
514 ASSERT(stackBase);
515 pthread_attr_destroy(&sattr);
516 stackThread = thread;
517 }
518 return static_cast<char*>(stackBase) + stackSize;
519 #elif PLATFORM(WINCE)
520 if (g_stackBase)
521 return g_stackBase;
522 else {
523 int dummy;
524 return getStackBase(&dummy);
525 }
526 #else
527 #error Need a way to get the stack base on this platform
528 #endif
529 }
530
531 #if ENABLE(JSC_MULTIPLE_THREADS)
532
533 static inline PlatformThread getCurrentPlatformThread()
534 {
535 #if PLATFORM(DARWIN)
536 return pthread_mach_thread_np(pthread_self());
537 #elif PLATFORM(WIN_OS)
538 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
539 return PlatformThread(GetCurrentThreadId(), threadHandle);
540 #endif
541 }
542
543 void Heap::makeUsableFromMultipleThreads()
544 {
545 if (m_currentThreadRegistrar)
546 return;
547
548 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
549 if (error)
550 CRASH();
551 }
552
553 void Heap::registerThread()
554 {
555 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
556 return;
557
558 pthread_setspecific(m_currentThreadRegistrar, this);
559 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
560
561 MutexLocker lock(m_registeredThreadsMutex);
562
563 thread->next = m_registeredThreads;
564 m_registeredThreads = thread;
565 }
566
567 void Heap::unregisterThread(void* p)
568 {
569 if (p)
570 static_cast<Heap*>(p)->unregisterThread();
571 }
572
573 void Heap::unregisterThread()
574 {
575 pthread_t currentPosixThread = pthread_self();
576
577 MutexLocker lock(m_registeredThreadsMutex);
578
579 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
580 Thread* t = m_registeredThreads;
581 m_registeredThreads = m_registeredThreads->next;
582 delete t;
583 } else {
584 Heap::Thread* last = m_registeredThreads;
585 Heap::Thread* t;
586 for (t = m_registeredThreads->next; t; t = t->next) {
587 if (pthread_equal(t->posixThread, currentPosixThread)) {
588 last->next = t->next;
589 break;
590 }
591 last = t;
592 }
593 ASSERT(t); // If t is NULL, we never found ourselves in the list.
594 delete t;
595 }
596 }
597
598 #else // ENABLE(JSC_MULTIPLE_THREADS)
599
600 void Heap::registerThread()
601 {
602 }
603
604 #endif
605
606 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
607
608 // cell size needs to be a power of two for this to be valid
609 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
610
611 void Heap::markConservatively(void* start, void* end)
612 {
613 if (start > end) {
614 void* tmp = start;
615 start = end;
616 end = tmp;
617 }
618
619 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
620 ASSERT(IS_POINTER_ALIGNED(start));
621 ASSERT(IS_POINTER_ALIGNED(end));
622
623 char** p = static_cast<char**>(start);
624 char** e = static_cast<char**>(end);
625
626 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
627 size_t usedNumberBlocks = numberHeap.usedBlocks;
628 CollectorBlock** primaryBlocks = primaryHeap.blocks;
629 CollectorBlock** numberBlocks = numberHeap.blocks;
630
631 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
632
633 while (p != e) {
634 char* x = *p++;
635 if (IS_HALF_CELL_ALIGNED(x) && x) {
636 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
637 xAsBits &= CELL_ALIGN_MASK;
638 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
639 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
640 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
641 for (size_t block = 0; block < usedNumberBlocks; block++) {
642 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
643 Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
644 goto endMarkLoop;
645 }
646 }
647
648 // Mark the primary heap
649 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
650 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
651 if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree != 0) {
652 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
653 if (!imp->marked())
654 imp->mark();
655 }
656 break;
657 }
658 }
659 endMarkLoop:
660 ;
661 }
662 }
663 }
664
665 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal()
666 {
667 void* dummy;
668 void* stackPointer = &dummy;
669 void* stackBase = currentThreadStackBase();
670 markConservatively(stackPointer, stackBase);
671 }
672
673 void Heap::markCurrentThreadConservatively()
674 {
675 // setjmp forces volatile registers onto the stack
676 jmp_buf registers;
677 #if COMPILER(MSVC)
678 #pragma warning(push)
679 #pragma warning(disable: 4611)
680 #endif
681 setjmp(registers);
682 #if COMPILER(MSVC)
683 #pragma warning(pop)
684 #endif
685
686 markCurrentThreadConservativelyInternal();
687 }
688
689 #if ENABLE(JSC_MULTIPLE_THREADS)
690
691 static inline void suspendThread(const PlatformThread& platformThread)
692 {
693 #if PLATFORM(DARWIN)
694 thread_suspend(platformThread);
695 #elif PLATFORM(WIN_OS)
696 SuspendThread(platformThread.handle);
697 #else
698 #error Need a way to suspend threads on this platform
699 #endif
700 }
701
702 static inline void resumeThread(const PlatformThread& platformThread)
703 {
704 #if PLATFORM(DARWIN)
705 thread_resume(platformThread);
706 #elif PLATFORM(WIN_OS)
707 ResumeThread(platformThread.handle);
708 #else
709 #error Need a way to resume threads on this platform
710 #endif
711 }
712
713 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
714
715 #if PLATFORM(DARWIN)
716
717 #if PLATFORM(X86)
718 typedef i386_thread_state_t PlatformThreadRegisters;
719 #elif PLATFORM(X86_64)
720 typedef x86_thread_state64_t PlatformThreadRegisters;
721 #elif PLATFORM(PPC)
722 typedef ppc_thread_state_t PlatformThreadRegisters;
723 #elif PLATFORM(PPC64)
724 typedef ppc_thread_state64_t PlatformThreadRegisters;
725 #elif PLATFORM(ARM)
726 typedef arm_thread_state_t PlatformThreadRegisters;
727 #else
728 #error Unknown Architecture
729 #endif
730
731 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
732 typedef CONTEXT PlatformThreadRegisters;
733 #else
734 #error Need a thread register struct for this platform
735 #endif
736
737 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
738 {
739 #if PLATFORM(DARWIN)
740
741 #if PLATFORM(X86)
742 unsigned user_count = sizeof(regs)/sizeof(int);
743 thread_state_flavor_t flavor = i386_THREAD_STATE;
744 #elif PLATFORM(X86_64)
745 unsigned user_count = x86_THREAD_STATE64_COUNT;
746 thread_state_flavor_t flavor = x86_THREAD_STATE64;
747 #elif PLATFORM(PPC)
748 unsigned user_count = PPC_THREAD_STATE_COUNT;
749 thread_state_flavor_t flavor = PPC_THREAD_STATE;
750 #elif PLATFORM(PPC64)
751 unsigned user_count = PPC_THREAD_STATE64_COUNT;
752 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
753 #elif PLATFORM(ARM)
754 unsigned user_count = ARM_THREAD_STATE_COUNT;
755 thread_state_flavor_t flavor = ARM_THREAD_STATE;
756 #else
757 #error Unknown Architecture
758 #endif
759
760 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
761 if (result != KERN_SUCCESS) {
762 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
763 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
764 CRASH();
765 }
766 return user_count * sizeof(usword_t);
767 // end PLATFORM(DARWIN)
768
769 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
770 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
771 GetThreadContext(platformThread.handle, &regs);
772 return sizeof(CONTEXT);
773 #else
774 #error Need a way to get thread registers on this platform
775 #endif
776 }
777
778 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
779 {
780 #if PLATFORM(DARWIN)
781
782 #if __DARWIN_UNIX03
783
784 #if PLATFORM(X86)
785 return reinterpret_cast<void*>(regs.__esp);
786 #elif PLATFORM(X86_64)
787 return reinterpret_cast<void*>(regs.__rsp);
788 #elif PLATFORM(PPC) || PLATFORM(PPC64)
789 return reinterpret_cast<void*>(regs.__r1);
790 #elif PLATFORM(ARM)
791 return reinterpret_cast<void*>(regs.__sp);
792 #else
793 #error Unknown Architecture
794 #endif
795
796 #else // !__DARWIN_UNIX03
797
798 #if PLATFORM(X86)
799 return reinterpret_cast<void*>(regs.esp);
800 #elif PLATFORM(X86_64)
801 return reinterpret_cast<void*>(regs.rsp);
802 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
803 return reinterpret_cast<void*>(regs.r1);
804 #else
805 #error Unknown Architecture
806 #endif
807
808 #endif // __DARWIN_UNIX03
809
810 // end PLATFORM(DARWIN)
811 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
812 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
813 #else
814 #error Need a way to get the stack pointer for another thread on this platform
815 #endif
816 }
817
818 void Heap::markOtherThreadConservatively(Thread* thread)
819 {
820 suspendThread(thread->platformThread);
821
822 PlatformThreadRegisters regs;
823 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
824
825 // mark the thread's registers
826 markConservatively(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
827
828 void* stackPointer = otherThreadStackPointer(regs);
829 markConservatively(stackPointer, thread->stackBase);
830
831 resumeThread(thread->platformThread);
832 }
833
834 #endif
835
836 void Heap::markStackObjectsConservatively()
837 {
838 markCurrentThreadConservatively();
839
840 #if ENABLE(JSC_MULTIPLE_THREADS)
841
842 if (m_currentThreadRegistrar) {
843
844 MutexLocker lock(m_registeredThreadsMutex);
845
846 #ifndef NDEBUG
847 // Forbid malloc during the mark phase. Marking a thread suspends it, so
848 // a malloc inside mark() would risk a deadlock with a thread that had been
849 // suspended while holding the malloc lock.
850 fastMallocForbid();
851 #endif
852 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
853 // and since this is a shared heap, they are real locks.
854 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
855 if (!pthread_equal(thread->posixThread, pthread_self()))
856 markOtherThreadConservatively(thread);
857 }
858 #ifndef NDEBUG
859 fastMallocAllow();
860 #endif
861 }
862 #endif
863 }
864
865 void Heap::setGCProtectNeedsLocking()
866 {
867 // Most clients do not need to call this, with the notable exception of WebCore.
868 // Clients that use shared heap have JSLock protection, while others are supposed
869 // to do explicit locking. WebCore violates this contract in Database code,
870 // which calls gcUnprotect from a secondary thread.
871 if (!m_protectedValuesMutex)
872 m_protectedValuesMutex.set(new Mutex);
873 }
874
875 void Heap::protect(JSValue k)
876 {
877 ASSERT(k);
878 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
879
880 if (!k.isCell())
881 return;
882
883 if (m_protectedValuesMutex)
884 m_protectedValuesMutex->lock();
885
886 m_protectedValues.add(k.asCell());
887
888 if (m_protectedValuesMutex)
889 m_protectedValuesMutex->unlock();
890 }
891
892 void Heap::unprotect(JSValue k)
893 {
894 ASSERT(k);
895 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
896
897 if (!k.isCell())
898 return;
899
900 if (m_protectedValuesMutex)
901 m_protectedValuesMutex->lock();
902
903 m_protectedValues.remove(k.asCell());
904
905 if (m_protectedValuesMutex)
906 m_protectedValuesMutex->unlock();
907 }
908
909 Heap* Heap::heap(JSValue v)
910 {
911 if (!v.isCell())
912 return 0;
913 return Heap::cellBlock(v.asCell())->heap;
914 }
915
916 void Heap::markProtectedObjects()
917 {
918 if (m_protectedValuesMutex)
919 m_protectedValuesMutex->lock();
920
921 ProtectCountSet::iterator end = m_protectedValues.end();
922 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
923 JSCell* val = it->first;
924 if (!val->marked())
925 val->mark();
926 }
927
928 if (m_protectedValuesMutex)
929 m_protectedValuesMutex->unlock();
930 }
931
932 template <HeapType heapType> size_t Heap::sweep()
933 {
934 typedef typename HeapConstants<heapType>::Block Block;
935 typedef typename HeapConstants<heapType>::Cell Cell;
936
937 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
938 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
939
940 size_t emptyBlocks = 0;
941 size_t numLiveObjects = heap.numLiveObjects;
942
943 for (size_t block = 0; block < heap.usedBlocks; block++) {
944 Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]);
945
946 size_t usedCells = curBlock->usedCells;
947 Cell* freeList = curBlock->freeList;
948
949 if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
950 // special case with a block where all cells are used -- testing indicates this happens often
951 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
952 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
953 Cell* cell = curBlock->cells + i;
954
955 if (heapType != NumberHeap) {
956 JSCell* imp = reinterpret_cast<JSCell*>(cell);
957 // special case for allocated but uninitialized object
958 // (We don't need this check earlier because nothing prior this point
959 // assumes the object has a valid vptr.)
960 if (cell->u.freeCell.zeroIfFree == 0)
961 continue;
962
963 imp->~JSCell();
964 }
965
966 --usedCells;
967 --numLiveObjects;
968
969 // put cell on the free list
970 cell->u.freeCell.zeroIfFree = 0;
971 cell->u.freeCell.next = freeList - (cell + 1);
972 freeList = cell;
973 }
974 }
975 } else {
976 size_t minimumCellsToProcess = usedCells;
977 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
978 Cell* cell = curBlock->cells + i;
979 if (cell->u.freeCell.zeroIfFree == 0) {
980 ++minimumCellsToProcess;
981 } else {
982 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
983 if (heapType != NumberHeap) {
984 JSCell* imp = reinterpret_cast<JSCell*>(cell);
985 imp->~JSCell();
986 }
987 --usedCells;
988 --numLiveObjects;
989
990 // put cell on the free list
991 cell->u.freeCell.zeroIfFree = 0;
992 cell->u.freeCell.next = freeList - (cell + 1);
993 freeList = cell;
994 }
995 }
996 }
997 }
998
999 curBlock->usedCells = static_cast<uint32_t>(usedCells);
1000 curBlock->freeList = freeList;
1001 curBlock->marked.clearAll();
1002
1003 if (usedCells == 0) {
1004 emptyBlocks++;
1005 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
1006 #if !DEBUG_COLLECTOR
1007 freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));
1008 #endif
1009 // swap with the last block so we compact as we go
1010 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
1011 heap.usedBlocks--;
1012 block--; // Don't move forward a step in this case
1013
1014 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
1015 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
1016 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
1017 }
1018 }
1019 }
1020 }
1021
1022 if (heap.numLiveObjects != numLiveObjects)
1023 heap.firstBlockWithPossibleSpace = 0;
1024
1025 heap.numLiveObjects = numLiveObjects;
1026 heap.numLiveObjectsAtLastCollect = numLiveObjects;
1027 heap.extraCost = 0;
1028 return numLiveObjects;
1029 }
1030
1031 bool Heap::collect()
1032 {
1033 #ifndef NDEBUG
1034 if (m_globalData->isSharedInstance) {
1035 ASSERT(JSLock::lockCount() > 0);
1036 ASSERT(JSLock::currentThreadIsHoldingLock());
1037 }
1038 #endif
1039
1040 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
1041 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
1042 CRASH();
1043
1044 JAVASCRIPTCORE_GC_BEGIN();
1045 primaryHeap.operationInProgress = Collection;
1046 numberHeap.operationInProgress = Collection;
1047
1048 // MARK: first mark all referenced objects recursively starting out from the set of root objects
1049
1050 markStackObjectsConservatively();
1051 markProtectedObjects();
1052 if (m_markListSet && m_markListSet->size())
1053 MarkedArgumentBuffer::markLists(*m_markListSet);
1054 if (m_globalData->exception && !m_globalData->exception.marked())
1055 m_globalData->exception.mark();
1056 m_globalData->interpreter->registerFile().markCallFrames(this);
1057 m_globalData->smallStrings.mark();
1058 if (m_globalData->scopeNodeBeingReparsed)
1059 m_globalData->scopeNodeBeingReparsed->mark();
1060 if (m_globalData->firstStringifierToMark)
1061 JSONObject::markStringifiers(m_globalData->firstStringifierToMark);
1062
1063 JAVASCRIPTCORE_GC_MARKED();
1064
1065 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
1066 size_t numLiveObjects = sweep<PrimaryHeap>();
1067 numLiveObjects += sweep<NumberHeap>();
1068
1069 primaryHeap.operationInProgress = NoOperation;
1070 numberHeap.operationInProgress = NoOperation;
1071 JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects);
1072
1073 return numLiveObjects < originalLiveObjects;
1074 }
1075
1076 size_t Heap::objectCount()
1077 {
1078 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count();
1079 }
1080
1081 template <HeapType heapType>
1082 static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap)
1083 {
1084 typedef HeapConstants<heapType> HC;
1085 for (size_t i = 0; i < heap.usedBlocks; ++i) {
1086 if (heap.blocks[i]) {
1087 statistics.size += BLOCK_SIZE;
1088 statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize;
1089 }
1090 }
1091 }
1092
1093 Heap::Statistics Heap::statistics() const
1094 {
1095 Statistics statistics = { 0, 0 };
1096 JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap);
1097 JSC::addToStatistics<NumberHeap>(statistics, numberHeap);
1098 return statistics;
1099 }
1100
1101 size_t Heap::globalObjectCount()
1102 {
1103 size_t count = 0;
1104 if (JSGlobalObject* head = m_globalData->head) {
1105 JSGlobalObject* o = head;
1106 do {
1107 ++count;
1108 o = o->next();
1109 } while (o != head);
1110 }
1111 return count;
1112 }
1113
1114 size_t Heap::protectedGlobalObjectCount()
1115 {
1116 if (m_protectedValuesMutex)
1117 m_protectedValuesMutex->lock();
1118
1119 size_t count = 0;
1120 if (JSGlobalObject* head = m_globalData->head) {
1121 JSGlobalObject* o = head;
1122 do {
1123 if (m_protectedValues.contains(o))
1124 ++count;
1125 o = o->next();
1126 } while (o != head);
1127 }
1128
1129 if (m_protectedValuesMutex)
1130 m_protectedValuesMutex->unlock();
1131
1132 return count;
1133 }
1134
1135 size_t Heap::protectedObjectCount()
1136 {
1137 if (m_protectedValuesMutex)
1138 m_protectedValuesMutex->lock();
1139
1140 size_t result = m_protectedValues.size();
1141
1142 if (m_protectedValuesMutex)
1143 m_protectedValuesMutex->unlock();
1144
1145 return result;
1146 }
1147
1148 static const char* typeName(JSCell* cell)
1149 {
1150 if (cell->isString())
1151 return "string";
1152 #if USE(JSVALUE32)
1153 if (cell->isNumber())
1154 return "number";
1155 #endif
1156 if (cell->isGetterSetter())
1157 return "gettersetter";
1158 ASSERT(cell->isObject());
1159 const ClassInfo* info = static_cast<JSObject*>(cell)->classInfo();
1160 return info ? info->className : "Object";
1161 }
1162
1163 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1164 {
1165 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1166
1167 if (m_protectedValuesMutex)
1168 m_protectedValuesMutex->lock();
1169
1170 ProtectCountSet::iterator end = m_protectedValues.end();
1171 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1172 counts->add(typeName(it->first));
1173
1174 if (m_protectedValuesMutex)
1175 m_protectedValuesMutex->unlock();
1176
1177 return counts;
1178 }
1179
1180 bool Heap::isBusy()
1181 {
1182 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1183 }
1184
1185 Heap::iterator Heap::primaryHeapBegin()
1186 {
1187 return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1188 }
1189
1190 Heap::iterator Heap::primaryHeapEnd()
1191 {
1192 return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1193 }
1194
1195 } // namespace JSC