]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/Collector.cpp
2789e50a2aea5be9098f0957ac0a570dc93f1dc4
[apple/javascriptcore.git] / runtime / Collector.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Collector.h"
23
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
29 #include "JSArray.h"
30 #include "JSGlobalObject.h"
31 #include "JSLock.h"
32 #include "JSONObject.h"
33 #include "JSString.h"
34 #include "JSValue.h"
35 #include "JSZombie.h"
36 #include "MarkStack.h"
37 #include "Nodes.h"
38 #include "Tracing.h"
39 #include <algorithm>
40 #include <limits.h>
41 #include <setjmp.h>
42 #include <stdlib.h>
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
47
48 #if OS(DARWIN)
49
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
55
56 #elif OS(WINDOWS)
57
58 #include <windows.h>
59 #include <malloc.h>
60
61 #elif OS(HAIKU)
62
63 #include <OS.h>
64
65 #elif OS(UNIX)
66
67 #include <stdlib.h>
68 #if !OS(HAIKU)
69 #include <sys/mman.h>
70 #endif
71 #include <unistd.h>
72
73 #if OS(SOLARIS)
74 #include <thread.h>
75 #else
76 #include <pthread.h>
77 #endif
78
79 #if HAVE(PTHREAD_NP_H)
80 #include <pthread_np.h>
81 #endif
82
83 #if OS(QNX)
84 #include <fcntl.h>
85 #include <sys/procfs.h>
86 #include <stdio.h>
87 #include <errno.h>
88 #endif
89
90 #endif
91
92 #define COLLECT_ON_EVERY_ALLOCATION 0
93
94 using std::max;
95
96 namespace JSC {
97
98 // tunable parameters
99
100 const size_t GROWTH_FACTOR = 2;
101 const size_t LOW_WATER_FACTOR = 4;
102 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
103 // This value has to be a macro to be used in max() without introducing
104 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
105 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
106
107 #if ENABLE(JSC_MULTIPLE_THREADS)
108
109 #if OS(DARWIN)
110 typedef mach_port_t PlatformThread;
111 #elif OS(WINDOWS)
112 typedef HANDLE PlatformThread;
113 #endif
114
115 class Heap::Thread {
116 public:
117 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
118 : posixThread(pthread)
119 , platformThread(platThread)
120 , stackBase(base)
121 {
122 }
123
124 Thread* next;
125 pthread_t posixThread;
126 PlatformThread platformThread;
127 void* stackBase;
128 };
129
130 #endif
131
132 Heap::Heap(JSGlobalData* globalData)
133 : m_markListSet(0)
134 #if ENABLE(JSC_MULTIPLE_THREADS)
135 , m_registeredThreads(0)
136 , m_currentThreadRegistrar(0)
137 #endif
138 #if OS(SYMBIAN)
139 , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)
140 #endif
141 , m_globalData(globalData)
142 {
143 ASSERT(globalData);
144 memset(&m_heap, 0, sizeof(CollectorHeap));
145 allocateBlock();
146 }
147
148 Heap::~Heap()
149 {
150 // The destroy function must already have been called, so assert this.
151 ASSERT(!m_globalData);
152 }
153
154 void Heap::destroy()
155 {
156 JSLock lock(SilenceAssertionsOnly);
157
158 if (!m_globalData)
159 return;
160
161 ASSERT(!m_globalData->dynamicGlobalObject);
162 ASSERT(!isBusy());
163
164 // The global object is not GC protected at this point, so sweeping may delete it
165 // (and thus the global data) before other objects that may use the global data.
166 RefPtr<JSGlobalData> protect(m_globalData);
167
168 delete m_markListSet;
169 m_markListSet = 0;
170
171 freeBlocks();
172
173 #if ENABLE(JSC_MULTIPLE_THREADS)
174 if (m_currentThreadRegistrar) {
175 int error = pthread_key_delete(m_currentThreadRegistrar);
176 ASSERT_UNUSED(error, !error);
177 }
178
179 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
180 for (Heap::Thread* t = m_registeredThreads; t;) {
181 Heap::Thread* next = t->next;
182 delete t;
183 t = next;
184 }
185 #endif
186 #if OS(SYMBIAN)
187 m_blockallocator.destroy();
188 #endif
189 m_globalData = 0;
190 }
191
192 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
193 {
194 #if OS(DARWIN)
195 vm_address_t address = 0;
196 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
197 #elif OS(SYMBIAN)
198 void* address = m_blockallocator.alloc();
199 if (!address)
200 CRASH();
201 #elif OS(WINCE)
202 void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
203 #elif OS(WINDOWS)
204 #if COMPILER(MINGW) && !COMPILER(MINGW64)
205 void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
206 #else
207 void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
208 #endif
209 memset(address, 0, BLOCK_SIZE);
210 #elif HAVE(POSIX_MEMALIGN)
211 void* address;
212 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
213 #else
214
215 #if ENABLE(JSC_MULTIPLE_THREADS)
216 #error Need to initialize pagesize safely.
217 #endif
218 static size_t pagesize = getpagesize();
219
220 size_t extra = 0;
221 if (BLOCK_SIZE > pagesize)
222 extra = BLOCK_SIZE - pagesize;
223
224 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
225 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
226
227 size_t adjust = 0;
228 if ((address & BLOCK_OFFSET_MASK) != 0)
229 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
230
231 if (adjust > 0)
232 munmap(reinterpret_cast<char*>(address), adjust);
233
234 if (adjust < extra)
235 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
236
237 address += adjust;
238 #endif
239
240 // Initialize block.
241
242 CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
243 block->heap = this;
244 clearMarkBits(block);
245
246 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
247 for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i)
248 new (block->cells + i) JSCell(dummyMarkableCellStructure);
249
250 // Add block to blocks vector.
251
252 size_t numBlocks = m_heap.numBlocks;
253 if (m_heap.usedBlocks == numBlocks) {
254 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
255 if (numBlocks > maxNumBlocks)
256 CRASH();
257 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
258 m_heap.numBlocks = numBlocks;
259 m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
260 }
261 m_heap.blocks[m_heap.usedBlocks++] = block;
262
263 return block;
264 }
265
266 NEVER_INLINE void Heap::freeBlock(size_t block)
267 {
268 m_heap.didShrink = true;
269
270 ObjectIterator it(m_heap, block);
271 ObjectIterator end(m_heap, block + 1);
272 for ( ; it != end; ++it)
273 (*it)->~JSCell();
274 freeBlockPtr(m_heap.blocks[block]);
275
276 // swap with the last block so we compact as we go
277 m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
278 m_heap.usedBlocks--;
279
280 if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
281 m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
282 m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
283 }
284 }
285
286 NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
287 {
288 #if OS(DARWIN)
289 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
290 #elif OS(SYMBIAN)
291 m_blockallocator.free(reinterpret_cast<void*>(block));
292 #elif OS(WINCE)
293 VirtualFree(block, 0, MEM_RELEASE);
294 #elif OS(WINDOWS)
295 #if COMPILER(MINGW) && !COMPILER(MINGW64)
296 __mingw_aligned_free(block);
297 #else
298 _aligned_free(block);
299 #endif
300 #elif HAVE(POSIX_MEMALIGN)
301 free(block);
302 #else
303 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
304 #endif
305 }
306
307 void Heap::freeBlocks()
308 {
309 ProtectCountSet protectedValuesCopy = m_protectedValues;
310
311 clearMarkBits();
312 ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
313 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
314 markCell(it->first);
315
316 m_heap.nextCell = 0;
317 m_heap.nextBlock = 0;
318 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
319 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
320 for ( ; it != end; ++it)
321 (*it)->~JSCell();
322
323 ASSERT(!protectedObjectCount());
324
325 protectedValuesEnd = protectedValuesCopy.end();
326 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
327 it->first->~JSCell();
328
329 for (size_t block = 0; block < m_heap.usedBlocks; ++block)
330 freeBlockPtr(m_heap.blocks[block]);
331
332 fastFree(m_heap.blocks);
333
334 memset(&m_heap, 0, sizeof(CollectorHeap));
335 }
336
337 void Heap::recordExtraCost(size_t cost)
338 {
339 // Our frequency of garbage collection tries to balance memory use against speed
340 // by collecting based on the number of newly created values. However, for values
341 // that hold on to a great deal of memory that's not in the form of other JS values,
342 // that is not good enough - in some cases a lot of those objects can pile up and
343 // use crazy amounts of memory without a GC happening. So we track these extra
344 // memory costs. Only unusually large objects are noted, and we only keep track
345 // of this extra cost until the next GC. In garbage collected languages, most values
346 // are either very short lived temporaries, or have extremely long lifetimes. So
347 // if a large value survives one garbage collection, there is not much point to
348 // collecting more frequently as long as it stays alive.
349
350 if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) {
351 // If the last iteration through the heap deallocated blocks, we need
352 // to clean up remaining garbage before marking. Otherwise, the conservative
353 // marking mechanism might follow a pointer to unmapped memory.
354 if (m_heap.didShrink)
355 sweep();
356 reset();
357 }
358 m_heap.extraCost += cost;
359 }
360
361 void* Heap::allocate(size_t s)
362 {
363 typedef HeapConstants::Block Block;
364 typedef HeapConstants::Cell Cell;
365
366 ASSERT(JSLock::lockCount() > 0);
367 ASSERT(JSLock::currentThreadIsHoldingLock());
368 ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
369
370 ASSERT(m_heap.operationInProgress == NoOperation);
371
372 #if COLLECT_ON_EVERY_ALLOCATION
373 collectAllGarbage();
374 ASSERT(m_heap.operationInProgress == NoOperation);
375 #endif
376
377 allocate:
378
379 // Fast case: find the next garbage cell and recycle it.
380
381 do {
382 ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
383 Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
384 do {
385 ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
386 if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
387 Cell* cell = block->cells + m_heap.nextCell;
388
389 m_heap.operationInProgress = Allocation;
390 JSCell* imp = reinterpret_cast<JSCell*>(cell);
391 imp->~JSCell();
392 m_heap.operationInProgress = NoOperation;
393
394 ++m_heap.nextCell;
395 return cell;
396 }
397 } while (++m_heap.nextCell != HeapConstants::cellsPerBlock);
398 m_heap.nextCell = 0;
399 } while (++m_heap.nextBlock != m_heap.usedBlocks);
400
401 // Slow case: reached the end of the heap. Mark live objects and start over.
402
403 reset();
404 goto allocate;
405 }
406
407 void Heap::resizeBlocks()
408 {
409 m_heap.didShrink = false;
410
411 size_t usedCellCount = markedCells();
412 size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
413 size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
414
415 size_t maxCellCount = 1.25f * minCellCount;
416 size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
417
418 if (m_heap.usedBlocks < minBlockCount)
419 growBlocks(minBlockCount);
420 else if (m_heap.usedBlocks > maxBlockCount)
421 shrinkBlocks(maxBlockCount);
422 }
423
424 void Heap::growBlocks(size_t neededBlocks)
425 {
426 ASSERT(m_heap.usedBlocks < neededBlocks);
427 while (m_heap.usedBlocks < neededBlocks)
428 allocateBlock();
429 }
430
431 void Heap::shrinkBlocks(size_t neededBlocks)
432 {
433 ASSERT(m_heap.usedBlocks > neededBlocks);
434
435 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
436 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
437 m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
438
439 for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
440 if (m_heap.blocks[i]->marked.isEmpty()) {
441 freeBlock(i);
442 } else
443 ++i;
444 }
445
446 // Reset the always-on last bit.
447 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
448 m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
449 }
450
451 #if OS(WINCE)
452 JS_EXPORTDATA void* g_stackBase = 0;
453
454 inline bool isPageWritable(void* page)
455 {
456 MEMORY_BASIC_INFORMATION memoryInformation;
457 DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
458
459 // return false on error, including ptr outside memory
460 if (result != sizeof(memoryInformation))
461 return false;
462
463 DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
464 return protect == PAGE_READWRITE
465 || protect == PAGE_WRITECOPY
466 || protect == PAGE_EXECUTE_READWRITE
467 || protect == PAGE_EXECUTE_WRITECOPY;
468 }
469
470 static void* getStackBase(void* previousFrame)
471 {
472 // find the address of this stack frame by taking the address of a local variable
473 bool isGrowingDownward;
474 void* thisFrame = (void*)(&isGrowingDownward);
475
476 isGrowingDownward = previousFrame < &thisFrame;
477 static DWORD pageSize = 0;
478 if (!pageSize) {
479 SYSTEM_INFO systemInfo;
480 GetSystemInfo(&systemInfo);
481 pageSize = systemInfo.dwPageSize;
482 }
483
484 // scan all of memory starting from this frame, and return the last writeable page found
485 register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
486 if (isGrowingDownward) {
487 while (currentPage > 0) {
488 // check for underflow
489 if (currentPage >= (char*)pageSize)
490 currentPage -= pageSize;
491 else
492 currentPage = 0;
493 if (!isPageWritable(currentPage))
494 return currentPage + pageSize;
495 }
496 return 0;
497 } else {
498 while (true) {
499 // guaranteed to complete because isPageWritable returns false at end of memory
500 currentPage += pageSize;
501 if (!isPageWritable(currentPage))
502 return currentPage;
503 }
504 }
505 }
506 #endif
507
508 #if OS(QNX)
509 static inline void *currentThreadStackBaseQNX()
510 {
511 static void* stackBase = 0;
512 static size_t stackSize = 0;
513 static pthread_t stackThread;
514 pthread_t thread = pthread_self();
515 if (stackBase == 0 || thread != stackThread) {
516 struct _debug_thread_info threadInfo;
517 memset(&threadInfo, 0, sizeof(threadInfo));
518 threadInfo.tid = pthread_self();
519 int fd = open("/proc/self", O_RDONLY);
520 if (fd == -1) {
521 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
522 return 0;
523 }
524 devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
525 close(fd);
526 stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
527 stackSize = threadInfo.stksize;
528 ASSERT(stackBase);
529 stackThread = thread;
530 }
531 return static_cast<char*>(stackBase) + stackSize;
532 }
533 #endif
534
535 static inline void* currentThreadStackBase()
536 {
537 #if OS(DARWIN)
538 pthread_t thread = pthread_self();
539 return pthread_get_stackaddr_np(thread);
540 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
541 // offset 0x18 from the FS segment register gives a pointer to
542 // the thread information block for the current thread
543 NT_TIB* pTib;
544 __asm {
545 MOV EAX, FS:[18h]
546 MOV pTib, EAX
547 }
548 return static_cast<void*>(pTib->StackBase);
549 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
550 // offset 0x18 from the FS segment register gives a pointer to
551 // the thread information block for the current thread
552 NT_TIB* pTib;
553 asm ( "movl %%fs:0x18, %0\n"
554 : "=r" (pTib)
555 );
556 return static_cast<void*>(pTib->StackBase);
557 #elif OS(WINDOWS) && CPU(X86_64)
558 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
559 return reinterpret_cast<void*>(pTib->StackBase);
560 #elif OS(QNX)
561 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
562 MutexLocker locker(mutex);
563 return currentThreadStackBaseQNX();
564 #elif OS(SOLARIS)
565 stack_t s;
566 thr_stksegment(&s);
567 return s.ss_sp;
568 #elif OS(OPENBSD)
569 pthread_t thread = pthread_self();
570 stack_t stack;
571 pthread_stackseg_np(thread, &stack);
572 return stack.ss_sp;
573 #elif OS(SYMBIAN)
574 TThreadStackInfo info;
575 RThread thread;
576 thread.StackInfo(info);
577 return (void*)info.iBase;
578 #elif OS(HAIKU)
579 thread_info threadInfo;
580 get_thread_info(find_thread(NULL), &threadInfo);
581 return threadInfo.stack_end;
582 #elif OS(UNIX)
583 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
584 MutexLocker locker(mutex);
585 static void* stackBase = 0;
586 static size_t stackSize = 0;
587 static pthread_t stackThread;
588 pthread_t thread = pthread_self();
589 if (stackBase == 0 || thread != stackThread) {
590 pthread_attr_t sattr;
591 pthread_attr_init(&sattr);
592 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
593 // e.g. on FreeBSD 5.4, neundorf@kde.org
594 pthread_attr_get_np(thread, &sattr);
595 #else
596 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
597 pthread_getattr_np(thread, &sattr);
598 #endif
599 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
600 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
601 ASSERT(stackBase);
602 pthread_attr_destroy(&sattr);
603 stackThread = thread;
604 }
605 return static_cast<char*>(stackBase) + stackSize;
606 #elif OS(WINCE)
607 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
608 MutexLocker locker(mutex);
609 if (g_stackBase)
610 return g_stackBase;
611 else {
612 int dummy;
613 return getStackBase(&dummy);
614 }
615 #else
616 #error Need a way to get the stack base on this platform
617 #endif
618 }
619
620 #if ENABLE(JSC_MULTIPLE_THREADS)
621
622 static inline PlatformThread getCurrentPlatformThread()
623 {
624 #if OS(DARWIN)
625 return pthread_mach_thread_np(pthread_self());
626 #elif OS(WINDOWS)
627 return pthread_getw32threadhandle_np(pthread_self());
628 #endif
629 }
630
631 void Heap::makeUsableFromMultipleThreads()
632 {
633 if (m_currentThreadRegistrar)
634 return;
635
636 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
637 if (error)
638 CRASH();
639 }
640
641 void Heap::registerThread()
642 {
643 ASSERT(!m_globalData->exclusiveThread || m_globalData->exclusiveThread == currentThread());
644
645 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
646 return;
647
648 pthread_setspecific(m_currentThreadRegistrar, this);
649 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
650
651 MutexLocker lock(m_registeredThreadsMutex);
652
653 thread->next = m_registeredThreads;
654 m_registeredThreads = thread;
655 }
656
657 void Heap::unregisterThread(void* p)
658 {
659 if (p)
660 static_cast<Heap*>(p)->unregisterThread();
661 }
662
663 void Heap::unregisterThread()
664 {
665 pthread_t currentPosixThread = pthread_self();
666
667 MutexLocker lock(m_registeredThreadsMutex);
668
669 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
670 Thread* t = m_registeredThreads;
671 m_registeredThreads = m_registeredThreads->next;
672 delete t;
673 } else {
674 Heap::Thread* last = m_registeredThreads;
675 Heap::Thread* t;
676 for (t = m_registeredThreads->next; t; t = t->next) {
677 if (pthread_equal(t->posixThread, currentPosixThread)) {
678 last->next = t->next;
679 break;
680 }
681 last = t;
682 }
683 ASSERT(t); // If t is NULL, we never found ourselves in the list.
684 delete t;
685 }
686 }
687
688 #else // ENABLE(JSC_MULTIPLE_THREADS)
689
690 void Heap::registerThread()
691 {
692 }
693
694 #endif
695
696 inline bool isPointerAligned(void* p)
697 {
698 return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
699 }
700
701 // Cell size needs to be a power of two for isPossibleCell to be valid.
702 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
703
704 #if USE(JSVALUE32)
705 static bool isHalfCellAligned(void *p)
706 {
707 return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0);
708 }
709
710 static inline bool isPossibleCell(void* p)
711 {
712 return isHalfCellAligned(p) && p;
713 }
714
715 #else
716
717 static inline bool isCellAligned(void *p)
718 {
719 return (((intptr_t)(p) & CELL_MASK) == 0);
720 }
721
722 static inline bool isPossibleCell(void* p)
723 {
724 return isCellAligned(p) && p;
725 }
726 #endif // USE(JSVALUE32)
727
728 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
729 {
730 if (start > end) {
731 void* tmp = start;
732 start = end;
733 end = tmp;
734 }
735
736 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
737 ASSERT(isPointerAligned(start));
738 ASSERT(isPointerAligned(end));
739
740 char** p = static_cast<char**>(start);
741 char** e = static_cast<char**>(end);
742
743 CollectorBlock** blocks = m_heap.blocks;
744 while (p != e) {
745 char* x = *p++;
746 if (isPossibleCell(x)) {
747 size_t usedBlocks;
748 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
749 xAsBits &= CELL_ALIGN_MASK;
750
751 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
752 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
753 if (offset > lastCellOffset)
754 continue;
755
756 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
757 usedBlocks = m_heap.usedBlocks;
758 for (size_t block = 0; block < usedBlocks; block++) {
759 if (blocks[block] != blockAddr)
760 continue;
761 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
762 markStack.drain();
763 }
764 }
765 }
766 }
767
768 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
769 {
770 void* dummy;
771 void* stackPointer = &dummy;
772 void* stackBase = currentThreadStackBase();
773 markConservatively(markStack, stackPointer, stackBase);
774 }
775
776 #if COMPILER(GCC)
777 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
778 #else
779 #define REGISTER_BUFFER_ALIGNMENT
780 #endif
781
782 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
783 {
784 // setjmp forces volatile registers onto the stack
785 jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
786 #if COMPILER(MSVC)
787 #pragma warning(push)
788 #pragma warning(disable: 4611)
789 #endif
790 setjmp(registers);
791 #if COMPILER(MSVC)
792 #pragma warning(pop)
793 #endif
794
795 markCurrentThreadConservativelyInternal(markStack);
796 }
797
798 #if ENABLE(JSC_MULTIPLE_THREADS)
799
800 static inline void suspendThread(const PlatformThread& platformThread)
801 {
802 #if OS(DARWIN)
803 thread_suspend(platformThread);
804 #elif OS(WINDOWS)
805 SuspendThread(platformThread);
806 #else
807 #error Need a way to suspend threads on this platform
808 #endif
809 }
810
811 static inline void resumeThread(const PlatformThread& platformThread)
812 {
813 #if OS(DARWIN)
814 thread_resume(platformThread);
815 #elif OS(WINDOWS)
816 ResumeThread(platformThread);
817 #else
818 #error Need a way to resume threads on this platform
819 #endif
820 }
821
822 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
823
824 #if OS(DARWIN)
825
826 #if CPU(X86)
827 typedef i386_thread_state_t PlatformThreadRegisters;
828 #elif CPU(X86_64)
829 typedef x86_thread_state64_t PlatformThreadRegisters;
830 #elif CPU(PPC)
831 typedef ppc_thread_state_t PlatformThreadRegisters;
832 #elif CPU(PPC64)
833 typedef ppc_thread_state64_t PlatformThreadRegisters;
834 #elif CPU(ARM)
835 typedef arm_thread_state_t PlatformThreadRegisters;
836 #else
837 #error Unknown Architecture
838 #endif
839
840 #elif OS(WINDOWS) && CPU(X86)
841 typedef CONTEXT PlatformThreadRegisters;
842 #else
843 #error Need a thread register struct for this platform
844 #endif
845
846 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
847 {
848 #if OS(DARWIN)
849
850 #if CPU(X86)
851 unsigned user_count = sizeof(regs)/sizeof(int);
852 thread_state_flavor_t flavor = i386_THREAD_STATE;
853 #elif CPU(X86_64)
854 unsigned user_count = x86_THREAD_STATE64_COUNT;
855 thread_state_flavor_t flavor = x86_THREAD_STATE64;
856 #elif CPU(PPC)
857 unsigned user_count = PPC_THREAD_STATE_COUNT;
858 thread_state_flavor_t flavor = PPC_THREAD_STATE;
859 #elif CPU(PPC64)
860 unsigned user_count = PPC_THREAD_STATE64_COUNT;
861 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
862 #elif CPU(ARM)
863 unsigned user_count = ARM_THREAD_STATE_COUNT;
864 thread_state_flavor_t flavor = ARM_THREAD_STATE;
865 #else
866 #error Unknown Architecture
867 #endif
868
869 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
870 if (result != KERN_SUCCESS) {
871 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
872 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
873 CRASH();
874 }
875 return user_count * sizeof(usword_t);
876 // end OS(DARWIN)
877
878 #elif OS(WINDOWS) && CPU(X86)
879 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
880 GetThreadContext(platformThread, &regs);
881 return sizeof(CONTEXT);
882 #else
883 #error Need a way to get thread registers on this platform
884 #endif
885 }
886
887 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
888 {
889 #if OS(DARWIN)
890
891 #if __DARWIN_UNIX03
892
893 #if CPU(X86)
894 return reinterpret_cast<void*>(regs.__esp);
895 #elif CPU(X86_64)
896 return reinterpret_cast<void*>(regs.__rsp);
897 #elif CPU(PPC) || CPU(PPC64)
898 return reinterpret_cast<void*>(regs.__r1);
899 #elif CPU(ARM)
900 return reinterpret_cast<void*>(regs.__sp);
901 #else
902 #error Unknown Architecture
903 #endif
904
905 #else // !__DARWIN_UNIX03
906
907 #if CPU(X86)
908 return reinterpret_cast<void*>(regs.esp);
909 #elif CPU(X86_64)
910 return reinterpret_cast<void*>(regs.rsp);
911 #elif CPU(PPC) || CPU(PPC64)
912 return reinterpret_cast<void*>(regs.r1);
913 #else
914 #error Unknown Architecture
915 #endif
916
917 #endif // __DARWIN_UNIX03
918
919 // end OS(DARWIN)
920 #elif CPU(X86) && OS(WINDOWS)
921 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
922 #else
923 #error Need a way to get the stack pointer for another thread on this platform
924 #endif
925 }
926
927 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
928 {
929 suspendThread(thread->platformThread);
930
931 PlatformThreadRegisters regs;
932 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
933
934 // mark the thread's registers
935 markConservatively(markStack, static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
936
937 void* stackPointer = otherThreadStackPointer(regs);
938 markConservatively(markStack, stackPointer, thread->stackBase);
939
940 resumeThread(thread->platformThread);
941 }
942
943 #endif
944
945 void Heap::markStackObjectsConservatively(MarkStack& markStack)
946 {
947 markCurrentThreadConservatively(markStack);
948
949 #if ENABLE(JSC_MULTIPLE_THREADS)
950
951 if (m_currentThreadRegistrar) {
952
953 MutexLocker lock(m_registeredThreadsMutex);
954
955 #ifndef NDEBUG
956 // Forbid malloc during the mark phase. Marking a thread suspends it, so
957 // a malloc inside markChildren() would risk a deadlock with a thread that had been
958 // suspended while holding the malloc lock.
959 fastMallocForbid();
960 #endif
961 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
962 // and since this is a shared heap, they are real locks.
963 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
964 if (!pthread_equal(thread->posixThread, pthread_self()))
965 markOtherThreadConservatively(markStack, thread);
966 }
967 #ifndef NDEBUG
968 fastMallocAllow();
969 #endif
970 }
971 #endif
972 }
973
974 void Heap::protect(JSValue k)
975 {
976 ASSERT(k);
977 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
978
979 if (!k.isCell())
980 return;
981
982 m_protectedValues.add(k.asCell());
983 }
984
985 bool Heap::unprotect(JSValue k)
986 {
987 ASSERT(k);
988 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
989
990 if (!k.isCell())
991 return false;
992
993 return m_protectedValues.remove(k.asCell());
994 }
995
996 void Heap::markProtectedObjects(MarkStack& markStack)
997 {
998 ProtectCountSet::iterator end = m_protectedValues.end();
999 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
1000 markStack.append(it->first);
1001 markStack.drain();
1002 }
1003 }
1004
1005 void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
1006 {
1007 m_tempSortingVectors.append(tempVector);
1008 }
1009
1010 void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
1011 {
1012 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
1013 m_tempSortingVectors.removeLast();
1014 }
1015
1016 void Heap::markTempSortVectors(MarkStack& markStack)
1017 {
1018 typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
1019
1020 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
1021 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
1022 Vector<ValueStringPair>* tempSortingVector = *it;
1023
1024 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
1025 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt)
1026 if (vectorIt->first)
1027 markStack.append(vectorIt->first);
1028 markStack.drain();
1029 }
1030 }
1031
1032 void Heap::clearMarkBits()
1033 {
1034 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
1035 clearMarkBits(m_heap.blocks[i]);
1036 }
1037
1038 void Heap::clearMarkBits(CollectorBlock* block)
1039 {
1040 // allocate assumes that the last cell in every block is marked.
1041 block->marked.clearAll();
1042 block->marked.set(HeapConstants::cellsPerBlock - 1);
1043 }
1044
1045 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
1046 {
1047 ASSERT(startBlock <= m_heap.usedBlocks);
1048 ASSERT(startCell < HeapConstants::cellsPerBlock);
1049
1050 if (startBlock >= m_heap.usedBlocks)
1051 return 0;
1052
1053 size_t result = 0;
1054 result += m_heap.blocks[startBlock]->marked.count(startCell);
1055 for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
1056 result += m_heap.blocks[i]->marked.count();
1057
1058 return result;
1059 }
1060
1061 void Heap::sweep()
1062 {
1063 ASSERT(m_heap.operationInProgress == NoOperation);
1064 if (m_heap.operationInProgress != NoOperation)
1065 CRASH();
1066 m_heap.operationInProgress = Collection;
1067
1068 #if !ENABLE(JSC_ZOMBIES)
1069 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
1070 #endif
1071
1072 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
1073 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
1074 for ( ; it != end; ++it) {
1075 JSCell* cell = *it;
1076 #if ENABLE(JSC_ZOMBIES)
1077 if (!cell->isZombie()) {
1078 const ClassInfo* info = cell->classInfo();
1079 cell->~JSCell();
1080 new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
1081 Heap::markCell(cell);
1082 }
1083 #else
1084 cell->~JSCell();
1085 // Callers of sweep assume it's safe to mark any cell in the heap.
1086 new (cell) JSCell(dummyMarkableCellStructure);
1087 #endif
1088 }
1089
1090 m_heap.operationInProgress = NoOperation;
1091 }
1092
1093 void Heap::markRoots()
1094 {
1095 #ifndef NDEBUG
1096 if (m_globalData->isSharedInstance()) {
1097 ASSERT(JSLock::lockCount() > 0);
1098 ASSERT(JSLock::currentThreadIsHoldingLock());
1099 }
1100 #endif
1101
1102 ASSERT(m_heap.operationInProgress == NoOperation);
1103 if (m_heap.operationInProgress != NoOperation)
1104 CRASH();
1105
1106 m_heap.operationInProgress = Collection;
1107
1108 MarkStack& markStack = m_globalData->markStack;
1109
1110 // Reset mark bits.
1111 clearMarkBits();
1112
1113 // Mark stack roots.
1114 markStackObjectsConservatively(markStack);
1115 m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1116
1117 // Mark explicitly registered roots.
1118 markProtectedObjects(markStack);
1119
1120 // Mark temporary vector for Array sorting
1121 markTempSortVectors(markStack);
1122
1123 // Mark misc. other roots.
1124 if (m_markListSet && m_markListSet->size())
1125 MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1126 if (m_globalData->exception)
1127 markStack.append(m_globalData->exception);
1128 if (m_globalData->functionCodeBlockBeingReparsed)
1129 m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
1130 if (m_globalData->firstStringifierToMark)
1131 JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1132
1133 // Mark the small strings cache last, since it will clear itself if nothing
1134 // else has marked it.
1135 m_globalData->smallStrings.markChildren(markStack);
1136
1137 markStack.drain();
1138 markStack.compact();
1139
1140 m_heap.operationInProgress = NoOperation;
1141 }
1142
1143 size_t Heap::objectCount() const
1144 {
1145 return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
1146 + m_heap.nextCell // allocated cells in current block
1147 + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
1148 - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
1149 }
1150
1151 void Heap::addToStatistics(Heap::Statistics& statistics) const
1152 {
1153 statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
1154 statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
1155 }
1156
1157 Heap::Statistics Heap::statistics() const
1158 {
1159 Statistics statistics = { 0, 0 };
1160 addToStatistics(statistics);
1161 return statistics;
1162 }
1163
1164 size_t Heap::globalObjectCount()
1165 {
1166 size_t count = 0;
1167 if (JSGlobalObject* head = m_globalData->head) {
1168 JSGlobalObject* o = head;
1169 do {
1170 ++count;
1171 o = o->next();
1172 } while (o != head);
1173 }
1174 return count;
1175 }
1176
1177 size_t Heap::protectedGlobalObjectCount()
1178 {
1179 size_t count = 0;
1180 if (JSGlobalObject* head = m_globalData->head) {
1181 JSGlobalObject* o = head;
1182 do {
1183 if (m_protectedValues.contains(o))
1184 ++count;
1185 o = o->next();
1186 } while (o != head);
1187 }
1188
1189 return count;
1190 }
1191
1192 size_t Heap::protectedObjectCount()
1193 {
1194 return m_protectedValues.size();
1195 }
1196
1197 static const char* typeName(JSCell* cell)
1198 {
1199 if (cell->isString())
1200 return "string";
1201 #if USE(JSVALUE32)
1202 if (cell->isNumber())
1203 return "number";
1204 #endif
1205 if (cell->isGetterSetter())
1206 return "Getter-Setter";
1207 if (cell->isAPIValueWrapper())
1208 return "API wrapper";
1209 if (cell->isPropertyNameIterator())
1210 return "For-in iterator";
1211 if (!cell->isObject())
1212 return "[empty cell]";
1213 const ClassInfo* info = cell->classInfo();
1214 return info ? info->className : "Object";
1215 }
1216
1217 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1218 {
1219 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1220
1221 ProtectCountSet::iterator end = m_protectedValues.end();
1222 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1223 counts->add(typeName(it->first));
1224
1225 return counts;
1226 }
1227
1228 HashCountedSet<const char*>* Heap::objectTypeCounts()
1229 {
1230 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1231
1232 LiveObjectIterator it = primaryHeapBegin();
1233 LiveObjectIterator heapEnd = primaryHeapEnd();
1234 for ( ; it != heapEnd; ++it)
1235 counts->add(typeName(*it));
1236
1237 return counts;
1238 }
1239
1240 bool Heap::isBusy()
1241 {
1242 return m_heap.operationInProgress != NoOperation;
1243 }
1244
1245 void Heap::reset()
1246 {
1247 JAVASCRIPTCORE_GC_BEGIN();
1248
1249 markRoots();
1250
1251 JAVASCRIPTCORE_GC_MARKED();
1252
1253 m_heap.nextCell = 0;
1254 m_heap.nextBlock = 0;
1255 m_heap.nextNumber = 0;
1256 m_heap.extraCost = 0;
1257 #if ENABLE(JSC_ZOMBIES)
1258 sweep();
1259 #endif
1260 resizeBlocks();
1261
1262 JAVASCRIPTCORE_GC_END();
1263 }
1264
1265 void Heap::collectAllGarbage()
1266 {
1267 JAVASCRIPTCORE_GC_BEGIN();
1268
1269 // If the last iteration through the heap deallocated blocks, we need
1270 // to clean up remaining garbage before marking. Otherwise, the conservative
1271 // marking mechanism might follow a pointer to unmapped memory.
1272 if (m_heap.didShrink)
1273 sweep();
1274
1275 markRoots();
1276
1277 JAVASCRIPTCORE_GC_MARKED();
1278
1279 m_heap.nextCell = 0;
1280 m_heap.nextBlock = 0;
1281 m_heap.nextNumber = 0;
1282 m_heap.extraCost = 0;
1283 sweep();
1284 resizeBlocks();
1285
1286 JAVASCRIPTCORE_GC_END();
1287 }
1288
1289 LiveObjectIterator Heap::primaryHeapBegin()
1290 {
1291 return LiveObjectIterator(m_heap, 0);
1292 }
1293
1294 LiveObjectIterator Heap::primaryHeapEnd()
1295 {
1296 return LiveObjectIterator(m_heap, m_heap.usedBlocks);
1297 }
1298
1299 } // namespace JSC