]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/Collector.cpp
JavaScriptCore-521.tar.gz
[apple/javascriptcore.git] / runtime / Collector.cpp
1 /*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 #include "config.h"
22 #include "Collector.h"
23
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
29 #include "JSLock.h"
30 #include "JSString.h"
31 #include "JSValue.h"
32 #include "Nodes.h"
33 #include "Tracing.h"
34 #include <algorithm>
35 #include <setjmp.h>
36 #include <stdlib.h>
37 #include <wtf/FastMalloc.h>
38 #include <wtf/HashCountedSet.h>
39 #include <wtf/UnusedParam.h>
40
41 #if PLATFORM(DARWIN)
42
43 #include <mach/mach_port.h>
44 #include <mach/mach_init.h>
45 #include <mach/task.h>
46 #include <mach/thread_act.h>
47 #include <mach/vm_map.h>
48
49 #elif PLATFORM(WIN_OS)
50
51 #include <windows.h>
52
53 #elif PLATFORM(UNIX)
54
55 #include <stdlib.h>
56 #include <sys/mman.h>
57 #include <unistd.h>
58
59 #if PLATFORM(SOLARIS)
60 #include <thread.h>
61 #endif
62
63 #if PLATFORM(OPENBSD)
64 #include <pthread.h>
65 #endif
66
67 #if HAVE(PTHREAD_NP_H)
68 #include <pthread_np.h>
69 #endif
70
71 #endif
72
73 #define DEBUG_COLLECTOR 0
74 #define COLLECT_ON_EVERY_ALLOCATION 0
75
76 using std::max;
77
78 namespace JSC {
79
80 // tunable parameters
81
82 const size_t SPARE_EMPTY_BLOCKS = 2;
83 const size_t GROWTH_FACTOR = 2;
84 const size_t LOW_WATER_FACTOR = 4;
85 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
86 // This value has to be a macro to be used in max() without introducing
87 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
88 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
89
90 static void freeHeap(CollectorHeap*);
91
92 #if ENABLE(JSC_MULTIPLE_THREADS)
93
94 #if PLATFORM(DARWIN)
95 typedef mach_port_t PlatformThread;
96 #elif PLATFORM(WIN_OS)
97 struct PlatformThread {
98 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
99 DWORD id;
100 HANDLE handle;
101 };
102 #endif
103
104 class Heap::Thread {
105 public:
106 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
107 : posixThread(pthread)
108 , platformThread(platThread)
109 , stackBase(base)
110 {
111 }
112
113 Thread* next;
114 pthread_t posixThread;
115 PlatformThread platformThread;
116 void* stackBase;
117 };
118
119 #endif
120
121 Heap::Heap(JSGlobalData* globalData)
122 : m_markListSet(0)
123 #if ENABLE(JSC_MULTIPLE_THREADS)
124 , m_registeredThreads(0)
125 , m_currentThreadRegistrar(0)
126 #endif
127 , m_globalData(globalData)
128 {
129 ASSERT(globalData);
130
131 memset(&primaryHeap, 0, sizeof(CollectorHeap));
132 memset(&numberHeap, 0, sizeof(CollectorHeap));
133 }
134
135 Heap::~Heap()
136 {
137 // The destroy function must already have been called, so assert this.
138 ASSERT(!m_globalData);
139 }
140
141 void Heap::destroy()
142 {
143 JSLock lock(false);
144
145 if (!m_globalData)
146 return;
147
148 // The global object is not GC protected at this point, so sweeping may delete it
149 // (and thus the global data) before other objects that may use the global data.
150 RefPtr<JSGlobalData> protect(m_globalData);
151
152 delete m_markListSet;
153 m_markListSet = 0;
154
155 sweep<PrimaryHeap>();
156 // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
157
158 ASSERT(!primaryHeap.numLiveObjects);
159
160 freeHeap(&primaryHeap);
161 freeHeap(&numberHeap);
162
163 #if ENABLE(JSC_MULTIPLE_THREADS)
164 if (m_currentThreadRegistrar) {
165 int error = pthread_key_delete(m_currentThreadRegistrar);
166 ASSERT_UNUSED(error, !error);
167 }
168
169 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
170 for (Heap::Thread* t = m_registeredThreads; t;) {
171 Heap::Thread* next = t->next;
172 delete t;
173 t = next;
174 }
175 #endif
176
177 m_globalData = 0;
178 }
179
180 template <HeapType heapType>
181 static NEVER_INLINE CollectorBlock* allocateBlock()
182 {
183 #if PLATFORM(DARWIN)
184 vm_address_t address = 0;
185 // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
186 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
187 #elif PLATFORM(SYMBIAN)
188 // no memory map in symbian, need to hack with fastMalloc
189 void* address = fastMalloc(BLOCK_SIZE);
190 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
191 #elif PLATFORM(WIN_OS)
192 // windows virtual address granularity is naturally 64k
193 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
194 #elif HAVE(POSIX_MEMALIGN)
195 void* address;
196 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
197 memset(address, 0, BLOCK_SIZE);
198 #else
199
200 #if ENABLE(JSC_MULTIPLE_THREADS)
201 #error Need to initialize pagesize safely.
202 #endif
203 static size_t pagesize = getpagesize();
204
205 size_t extra = 0;
206 if (BLOCK_SIZE > pagesize)
207 extra = BLOCK_SIZE - pagesize;
208
209 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
210 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
211
212 size_t adjust = 0;
213 if ((address & BLOCK_OFFSET_MASK) != 0)
214 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
215
216 if (adjust > 0)
217 munmap(reinterpret_cast<char*>(address), adjust);
218
219 if (adjust < extra)
220 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
221
222 address += adjust;
223 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
224 #endif
225 reinterpret_cast<CollectorBlock*>(address)->type = heapType;
226 return reinterpret_cast<CollectorBlock*>(address);
227 }
228
229 static void freeBlock(CollectorBlock* block)
230 {
231 #if PLATFORM(DARWIN)
232 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
233 #elif PLATFORM(SYMBIAN)
234 fastFree(block);
235 #elif PLATFORM(WIN_OS)
236 VirtualFree(block, 0, MEM_RELEASE);
237 #elif HAVE(POSIX_MEMALIGN)
238 free(block);
239 #else
240 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
241 #endif
242 }
243
244 static void freeHeap(CollectorHeap* heap)
245 {
246 for (size_t i = 0; i < heap->usedBlocks; ++i)
247 if (heap->blocks[i])
248 freeBlock(heap->blocks[i]);
249 fastFree(heap->blocks);
250 memset(heap, 0, sizeof(CollectorHeap));
251 }
252
253 void Heap::recordExtraCost(size_t cost)
254 {
255 // Our frequency of garbage collection tries to balance memory use against speed
256 // by collecting based on the number of newly created values. However, for values
257 // that hold on to a great deal of memory that's not in the form of other JS values,
258 // that is not good enough - in some cases a lot of those objects can pile up and
259 // use crazy amounts of memory without a GC happening. So we track these extra
260 // memory costs. Only unusually large objects are noted, and we only keep track
261 // of this extra cost until the next GC. In garbage collected languages, most values
262 // are either very short lived temporaries, or have extremely long lifetimes. So
263 // if a large value survives one garbage collection, there is not much point to
264 // collecting more frequently as long as it stays alive.
265 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
266
267 primaryHeap.extraCost += cost;
268 }
269
270 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
271 {
272 typedef typename HeapConstants<heapType>::Block Block;
273 typedef typename HeapConstants<heapType>::Cell Cell;
274
275 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
276 ASSERT(JSLock::lockCount() > 0);
277 ASSERT(JSLock::currentThreadIsHoldingLock());
278 ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize);
279
280 ASSERT(heap.operationInProgress == NoOperation);
281 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
282 // FIXME: If another global variable access here doesn't hurt performance
283 // too much, we could CRASH() in NDEBUG builds, which could help ensure we
284 // don't spend any time debugging cases where we allocate inside an object's
285 // deallocation code.
286
287 #if COLLECT_ON_EVERY_ALLOCATION
288 collect();
289 #endif
290
291 size_t numLiveObjects = heap.numLiveObjects;
292 size_t usedBlocks = heap.usedBlocks;
293 size_t i = heap.firstBlockWithPossibleSpace;
294
295 // if we have a huge amount of extra cost, we'll try to collect even if we still have
296 // free cells left.
297 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
298 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
299 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
300 const size_t newCost = numNewObjects + heap.extraCost;
301 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
302 goto collect;
303 }
304
305 ASSERT(heap.operationInProgress == NoOperation);
306 #ifndef NDEBUG
307 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
308 heap.operationInProgress = Allocation;
309 #endif
310
311 scan:
312 Block* targetBlock;
313 size_t targetBlockUsedCells;
314 if (i != usedBlocks) {
315 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
316 targetBlockUsedCells = targetBlock->usedCells;
317 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
318 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
319 if (++i == usedBlocks)
320 goto collect;
321 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
322 targetBlockUsedCells = targetBlock->usedCells;
323 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
324 }
325 heap.firstBlockWithPossibleSpace = i;
326 } else {
327
328 collect:
329 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
330 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
331 const size_t newCost = numNewObjects + heap.extraCost;
332
333 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
334 #ifndef NDEBUG
335 heap.operationInProgress = NoOperation;
336 #endif
337 bool collected = collect();
338 #ifndef NDEBUG
339 heap.operationInProgress = Allocation;
340 #endif
341 if (collected) {
342 numLiveObjects = heap.numLiveObjects;
343 usedBlocks = heap.usedBlocks;
344 i = heap.firstBlockWithPossibleSpace;
345 goto scan;
346 }
347 }
348
349 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
350 size_t numBlocks = heap.numBlocks;
351 if (usedBlocks == numBlocks) {
352 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
353 if (numBlocks > maxNumBlocks)
354 CRASH();
355 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
356 heap.numBlocks = numBlocks;
357 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
358 }
359
360 targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>());
361 targetBlock->freeList = targetBlock->cells;
362 targetBlock->heap = this;
363 targetBlockUsedCells = 0;
364 heap.blocks[usedBlocks] = reinterpret_cast<CollectorBlock*>(targetBlock);
365 heap.usedBlocks = usedBlocks + 1;
366 heap.firstBlockWithPossibleSpace = usedBlocks;
367 }
368
369 // find a free spot in the block and detach it from the free list
370 Cell* newCell = targetBlock->freeList;
371
372 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
373 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
374
375 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
376 heap.numLiveObjects = numLiveObjects + 1;
377
378 #ifndef NDEBUG
379 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
380 heap.operationInProgress = NoOperation;
381 #endif
382
383 return newCell;
384 }
385
386 void* Heap::allocate(size_t s)
387 {
388 return heapAllocate<PrimaryHeap>(s);
389 }
390
391 void* Heap::allocateNumber(size_t s)
392 {
393 return heapAllocate<NumberHeap>(s);
394 }
395
396 static inline void* currentThreadStackBase()
397 {
398 #if PLATFORM(DARWIN)
399 pthread_t thread = pthread_self();
400 return pthread_get_stackaddr_np(thread);
401 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
402 // offset 0x18 from the FS segment register gives a pointer to
403 // the thread information block for the current thread
404 NT_TIB* pTib;
405 __asm {
406 MOV EAX, FS:[18h]
407 MOV pTib, EAX
408 }
409 return static_cast<void*>(pTib->StackBase);
410 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
411 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
412 return reinterpret_cast<void*>(pTib->StackBase);
413 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
414 // offset 0x18 from the FS segment register gives a pointer to
415 // the thread information block for the current thread
416 NT_TIB* pTib;
417 asm ( "movl %%fs:0x18, %0\n"
418 : "=r" (pTib)
419 );
420 return static_cast<void*>(pTib->StackBase);
421 #elif PLATFORM(SOLARIS)
422 stack_t s;
423 thr_stksegment(&s);
424 return s.ss_sp;
425 #elif PLATFORM(OPENBSD)
426 pthread_t thread = pthread_self();
427 stack_t stack;
428 pthread_stackseg_np(thread, &stack);
429 return stack.ss_sp;
430 #elif PLATFORM(UNIX)
431 static void* stackBase = 0;
432 static size_t stackSize = 0;
433 static pthread_t stackThread;
434 pthread_t thread = pthread_self();
435 if (stackBase == 0 || thread != stackThread) {
436 pthread_attr_t sattr;
437 pthread_attr_init(&sattr);
438 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
439 // e.g. on FreeBSD 5.4, neundorf@kde.org
440 pthread_attr_get_np(thread, &sattr);
441 #else
442 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
443 pthread_getattr_np(thread, &sattr);
444 #endif
445 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
446 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
447 ASSERT(stackBase);
448 pthread_attr_destroy(&sattr);
449 stackThread = thread;
450 }
451 return static_cast<char*>(stackBase) + stackSize;
452 #elif PLATFORM(SYMBIAN)
453 static void* stackBase = 0;
454 if (stackBase == 0) {
455 TThreadStackInfo info;
456 RThread thread;
457 thread.StackInfo(info);
458 stackBase = (void*)info.iBase;
459 }
460 return (void*)stackBase;
461 #else
462 #error Need a way to get the stack base on this platform
463 #endif
464 }
465
466 #if ENABLE(JSC_MULTIPLE_THREADS)
467
468 static inline PlatformThread getCurrentPlatformThread()
469 {
470 #if PLATFORM(DARWIN)
471 return pthread_mach_thread_np(pthread_self());
472 #elif PLATFORM(WIN_OS)
473 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
474 return PlatformThread(GetCurrentThreadId(), threadHandle);
475 #endif
476 }
477
478 void Heap::makeUsableFromMultipleThreads()
479 {
480 if (m_currentThreadRegistrar)
481 return;
482
483 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
484 if (error)
485 CRASH();
486 }
487
488 void Heap::registerThread()
489 {
490 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
491 return;
492
493 pthread_setspecific(m_currentThreadRegistrar, this);
494 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
495
496 MutexLocker lock(m_registeredThreadsMutex);
497
498 thread->next = m_registeredThreads;
499 m_registeredThreads = thread;
500 }
501
502 void Heap::unregisterThread(void* p)
503 {
504 if (p)
505 static_cast<Heap*>(p)->unregisterThread();
506 }
507
508 void Heap::unregisterThread()
509 {
510 pthread_t currentPosixThread = pthread_self();
511
512 MutexLocker lock(m_registeredThreadsMutex);
513
514 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
515 Thread* t = m_registeredThreads;
516 m_registeredThreads = m_registeredThreads->next;
517 delete t;
518 } else {
519 Heap::Thread* last = m_registeredThreads;
520 Heap::Thread* t;
521 for (t = m_registeredThreads->next; t; t = t->next) {
522 if (pthread_equal(t->posixThread, currentPosixThread)) {
523 last->next = t->next;
524 break;
525 }
526 last = t;
527 }
528 ASSERT(t); // If t is NULL, we never found ourselves in the list.
529 delete t;
530 }
531 }
532
533 #else // ENABLE(JSC_MULTIPLE_THREADS)
534
535 void Heap::registerThread()
536 {
537 }
538
539 #endif
540
541 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
542
543 // cell size needs to be a power of two for this to be valid
544 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
545
546 void Heap::markConservatively(void* start, void* end)
547 {
548 if (start > end) {
549 void* tmp = start;
550 start = end;
551 end = tmp;
552 }
553
554 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
555 ASSERT(IS_POINTER_ALIGNED(start));
556 ASSERT(IS_POINTER_ALIGNED(end));
557
558 char** p = static_cast<char**>(start);
559 char** e = static_cast<char**>(end);
560
561 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
562 size_t usedNumberBlocks = numberHeap.usedBlocks;
563 CollectorBlock** primaryBlocks = primaryHeap.blocks;
564 CollectorBlock** numberBlocks = numberHeap.blocks;
565
566 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
567
568 while (p != e) {
569 char* x = *p++;
570 if (IS_HALF_CELL_ALIGNED(x) && x) {
571 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
572 xAsBits &= CELL_ALIGN_MASK;
573 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
574 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
575 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
576 for (size_t block = 0; block < usedNumberBlocks; block++) {
577 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
578 Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
579 goto endMarkLoop;
580 }
581 }
582
583 // Mark the primary heap
584 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
585 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
586 if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree != 0) {
587 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
588 if (!imp->marked())
589 imp->mark();
590 }
591 break;
592 }
593 }
594 endMarkLoop:
595 ;
596 }
597 }
598 }
599
600 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal()
601 {
602 void* dummy;
603 void* stackPointer = &dummy;
604 void* stackBase = currentThreadStackBase();
605 markConservatively(stackPointer, stackBase);
606 }
607
608 void Heap::markCurrentThreadConservatively()
609 {
610 // setjmp forces volatile registers onto the stack
611 jmp_buf registers;
612 #if COMPILER(MSVC)
613 #pragma warning(push)
614 #pragma warning(disable: 4611)
615 #endif
616 setjmp(registers);
617 #if COMPILER(MSVC)
618 #pragma warning(pop)
619 #endif
620
621 markCurrentThreadConservativelyInternal();
622 }
623
624 #if ENABLE(JSC_MULTIPLE_THREADS)
625
626 static inline void suspendThread(const PlatformThread& platformThread)
627 {
628 #if PLATFORM(DARWIN)
629 thread_suspend(platformThread);
630 #elif PLATFORM(WIN_OS)
631 SuspendThread(platformThread.handle);
632 #else
633 #error Need a way to suspend threads on this platform
634 #endif
635 }
636
637 static inline void resumeThread(const PlatformThread& platformThread)
638 {
639 #if PLATFORM(DARWIN)
640 thread_resume(platformThread);
641 #elif PLATFORM(WIN_OS)
642 ResumeThread(platformThread.handle);
643 #else
644 #error Need a way to resume threads on this platform
645 #endif
646 }
647
648 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
649
650 #if PLATFORM(DARWIN)
651
652 #if PLATFORM(X86)
653 typedef i386_thread_state_t PlatformThreadRegisters;
654 #elif PLATFORM(X86_64)
655 typedef x86_thread_state64_t PlatformThreadRegisters;
656 #elif PLATFORM(PPC)
657 typedef ppc_thread_state_t PlatformThreadRegisters;
658 #elif PLATFORM(PPC64)
659 typedef ppc_thread_state64_t PlatformThreadRegisters;
660 #elif PLATFORM(ARM)
661 typedef arm_thread_state_t PlatformThreadRegisters;
662 #else
663 #error Unknown Architecture
664 #endif
665
666 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
667 typedef CONTEXT PlatformThreadRegisters;
668 #else
669 #error Need a thread register struct for this platform
670 #endif
671
672 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
673 {
674 #if PLATFORM(DARWIN)
675
676 #if PLATFORM(X86)
677 unsigned user_count = sizeof(regs)/sizeof(int);
678 thread_state_flavor_t flavor = i386_THREAD_STATE;
679 #elif PLATFORM(X86_64)
680 unsigned user_count = x86_THREAD_STATE64_COUNT;
681 thread_state_flavor_t flavor = x86_THREAD_STATE64;
682 #elif PLATFORM(PPC)
683 unsigned user_count = PPC_THREAD_STATE_COUNT;
684 thread_state_flavor_t flavor = PPC_THREAD_STATE;
685 #elif PLATFORM(PPC64)
686 unsigned user_count = PPC_THREAD_STATE64_COUNT;
687 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
688 #elif PLATFORM(ARM)
689 unsigned user_count = ARM_THREAD_STATE_COUNT;
690 thread_state_flavor_t flavor = ARM_THREAD_STATE;
691 #else
692 #error Unknown Architecture
693 #endif
694
695 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
696 if (result != KERN_SUCCESS) {
697 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
698 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
699 CRASH();
700 }
701 return user_count * sizeof(usword_t);
702 // end PLATFORM(DARWIN)
703
704 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
705 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
706 GetThreadContext(platformThread.handle, &regs);
707 return sizeof(CONTEXT);
708 #else
709 #error Need a way to get thread registers on this platform
710 #endif
711 }
712
713 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
714 {
715 #if PLATFORM(DARWIN)
716
717 #if __DARWIN_UNIX03
718
719 #if PLATFORM(X86)
720 return reinterpret_cast<void*>(regs.__esp);
721 #elif PLATFORM(X86_64)
722 return reinterpret_cast<void*>(regs.__rsp);
723 #elif PLATFORM(PPC) || PLATFORM(PPC64)
724 return reinterpret_cast<void*>(regs.__r1);
725 #elif PLATFORM(ARM)
726 return reinterpret_cast<void*>(regs.__sp);
727 #else
728 #error Unknown Architecture
729 #endif
730
731 #else // !__DARWIN_UNIX03
732
733 #if PLATFORM(X86)
734 return reinterpret_cast<void*>(regs.esp);
735 #elif PLATFORM(X86_64)
736 return reinterpret_cast<void*>(regs.rsp);
737 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
738 return reinterpret_cast<void*>(regs.r1);
739 #else
740 #error Unknown Architecture
741 #endif
742
743 #endif // __DARWIN_UNIX03
744
745 // end PLATFORM(DARWIN)
746 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
747 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
748 #else
749 #error Need a way to get the stack pointer for another thread on this platform
750 #endif
751 }
752
753 void Heap::markOtherThreadConservatively(Thread* thread)
754 {
755 suspendThread(thread->platformThread);
756
757 PlatformThreadRegisters regs;
758 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
759
760 // mark the thread's registers
761 markConservatively(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
762
763 void* stackPointer = otherThreadStackPointer(regs);
764 markConservatively(stackPointer, thread->stackBase);
765
766 resumeThread(thread->platformThread);
767 }
768
769 #endif
770
771 void Heap::markStackObjectsConservatively()
772 {
773 markCurrentThreadConservatively();
774
775 #if ENABLE(JSC_MULTIPLE_THREADS)
776
777 if (m_currentThreadRegistrar) {
778
779 MutexLocker lock(m_registeredThreadsMutex);
780
781 #ifndef NDEBUG
782 // Forbid malloc during the mark phase. Marking a thread suspends it, so
783 // a malloc inside mark() would risk a deadlock with a thread that had been
784 // suspended while holding the malloc lock.
785 fastMallocForbid();
786 #endif
787 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
788 // and since this is a shared heap, they are real locks.
789 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
790 if (!pthread_equal(thread->posixThread, pthread_self()))
791 markOtherThreadConservatively(thread);
792 }
793 #ifndef NDEBUG
794 fastMallocAllow();
795 #endif
796 }
797 #endif
798 }
799
800 void Heap::setGCProtectNeedsLocking()
801 {
802 // Most clients do not need to call this, with the notable exception of WebCore.
803 // Clients that use shared heap have JSLock protection, while others are supposed
804 // to do explicit locking. WebCore violates this contract in Database code,
805 // which calls gcUnprotect from a secondary thread.
806 if (!m_protectedValuesMutex)
807 m_protectedValuesMutex.set(new Mutex);
808 }
809
810 void Heap::protect(JSValuePtr k)
811 {
812 ASSERT(k);
813 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
814
815 if (!k.isCell())
816 return;
817
818 if (m_protectedValuesMutex)
819 m_protectedValuesMutex->lock();
820
821 m_protectedValues.add(k.asCell());
822
823 if (m_protectedValuesMutex)
824 m_protectedValuesMutex->unlock();
825 }
826
827 void Heap::unprotect(JSValuePtr k)
828 {
829 ASSERT(k);
830 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
831
832 if (!k.isCell())
833 return;
834
835 if (m_protectedValuesMutex)
836 m_protectedValuesMutex->lock();
837
838 m_protectedValues.remove(k.asCell());
839
840 if (m_protectedValuesMutex)
841 m_protectedValuesMutex->unlock();
842 }
843
844 Heap* Heap::heap(JSValuePtr v)
845 {
846 if (!v.isCell())
847 return 0;
848 return Heap::cellBlock(v.asCell())->heap;
849 }
850
851 void Heap::markProtectedObjects()
852 {
853 if (m_protectedValuesMutex)
854 m_protectedValuesMutex->lock();
855
856 ProtectCountSet::iterator end = m_protectedValues.end();
857 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
858 JSCell* val = it->first;
859 if (!val->marked())
860 val->mark();
861 }
862
863 if (m_protectedValuesMutex)
864 m_protectedValuesMutex->unlock();
865 }
866
867 template <HeapType heapType> size_t Heap::sweep()
868 {
869 typedef typename HeapConstants<heapType>::Block Block;
870 typedef typename HeapConstants<heapType>::Cell Cell;
871
872 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
873 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
874
875 size_t emptyBlocks = 0;
876 size_t numLiveObjects = heap.numLiveObjects;
877
878 for (size_t block = 0; block < heap.usedBlocks; block++) {
879 Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]);
880
881 size_t usedCells = curBlock->usedCells;
882 Cell* freeList = curBlock->freeList;
883
884 if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
885 // special case with a block where all cells are used -- testing indicates this happens often
886 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
887 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
888 Cell* cell = curBlock->cells + i;
889
890 if (heapType != NumberHeap) {
891 JSCell* imp = reinterpret_cast<JSCell*>(cell);
892 // special case for allocated but uninitialized object
893 // (We don't need this check earlier because nothing prior this point
894 // assumes the object has a valid vptr.)
895 if (cell->u.freeCell.zeroIfFree == 0)
896 continue;
897
898 imp->~JSCell();
899 }
900
901 --usedCells;
902 --numLiveObjects;
903
904 // put cell on the free list
905 cell->u.freeCell.zeroIfFree = 0;
906 cell->u.freeCell.next = freeList - (cell + 1);
907 freeList = cell;
908 }
909 }
910 } else {
911 size_t minimumCellsToProcess = usedCells;
912 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
913 Cell* cell = curBlock->cells + i;
914 if (cell->u.freeCell.zeroIfFree == 0) {
915 ++minimumCellsToProcess;
916 } else {
917 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
918 if (heapType != NumberHeap) {
919 JSCell* imp = reinterpret_cast<JSCell*>(cell);
920 imp->~JSCell();
921 }
922 --usedCells;
923 --numLiveObjects;
924
925 // put cell on the free list
926 cell->u.freeCell.zeroIfFree = 0;
927 cell->u.freeCell.next = freeList - (cell + 1);
928 freeList = cell;
929 }
930 }
931 }
932 }
933
934 curBlock->usedCells = static_cast<uint32_t>(usedCells);
935 curBlock->freeList = freeList;
936 curBlock->marked.clearAll();
937
938 if (usedCells == 0) {
939 emptyBlocks++;
940 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
941 #if !DEBUG_COLLECTOR
942 freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));
943 #endif
944 // swap with the last block so we compact as we go
945 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
946 heap.usedBlocks--;
947 block--; // Don't move forward a step in this case
948
949 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
950 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
951 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
952 }
953 }
954 }
955 }
956
957 if (heap.numLiveObjects != numLiveObjects)
958 heap.firstBlockWithPossibleSpace = 0;
959
960 heap.numLiveObjects = numLiveObjects;
961 heap.numLiveObjectsAtLastCollect = numLiveObjects;
962 heap.extraCost = 0;
963 return numLiveObjects;
964 }
965
966 bool Heap::collect()
967 {
968 #ifndef NDEBUG
969 if (m_globalData->isSharedInstance) {
970 ASSERT(JSLock::lockCount() > 0);
971 ASSERT(JSLock::currentThreadIsHoldingLock());
972 }
973 #endif
974
975 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
976 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
977 CRASH();
978
979 JAVASCRIPTCORE_GC_BEGIN();
980 primaryHeap.operationInProgress = Collection;
981 numberHeap.operationInProgress = Collection;
982
983 // MARK: first mark all referenced objects recursively starting out from the set of root objects
984
985 markStackObjectsConservatively();
986 markProtectedObjects();
987 if (m_markListSet && m_markListSet->size())
988 ArgList::markLists(*m_markListSet);
989 if (m_globalData->exception && !m_globalData->exception.marked())
990 m_globalData->exception.mark();
991 m_globalData->interpreter->registerFile().markCallFrames(this);
992 m_globalData->smallStrings.mark();
993 if (m_globalData->scopeNodeBeingReparsed)
994 m_globalData->scopeNodeBeingReparsed->mark();
995
996 JAVASCRIPTCORE_GC_MARKED();
997
998 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
999 size_t numLiveObjects = sweep<PrimaryHeap>();
1000 numLiveObjects += sweep<NumberHeap>();
1001
1002 primaryHeap.operationInProgress = NoOperation;
1003 numberHeap.operationInProgress = NoOperation;
1004 JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects);
1005
1006 return numLiveObjects < originalLiveObjects;
1007 }
1008
1009 size_t Heap::objectCount()
1010 {
1011 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count();
1012 }
1013
1014 template <HeapType heapType>
1015 static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap)
1016 {
1017 typedef HeapConstants<heapType> HC;
1018 for (size_t i = 0; i < heap.usedBlocks; ++i) {
1019 if (heap.blocks[i]) {
1020 statistics.size += BLOCK_SIZE;
1021 statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize;
1022 }
1023 }
1024 }
1025
1026 Heap::Statistics Heap::statistics() const
1027 {
1028 Statistics statistics = { 0, 0 };
1029 JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap);
1030 JSC::addToStatistics<NumberHeap>(statistics, numberHeap);
1031 return statistics;
1032 }
1033
1034 size_t Heap::globalObjectCount()
1035 {
1036 size_t count = 0;
1037 if (JSGlobalObject* head = m_globalData->head) {
1038 JSGlobalObject* o = head;
1039 do {
1040 ++count;
1041 o = o->next();
1042 } while (o != head);
1043 }
1044 return count;
1045 }
1046
1047 size_t Heap::protectedGlobalObjectCount()
1048 {
1049 if (m_protectedValuesMutex)
1050 m_protectedValuesMutex->lock();
1051
1052 size_t count = 0;
1053 if (JSGlobalObject* head = m_globalData->head) {
1054 JSGlobalObject* o = head;
1055 do {
1056 if (m_protectedValues.contains(o))
1057 ++count;
1058 o = o->next();
1059 } while (o != head);
1060 }
1061
1062 if (m_protectedValuesMutex)
1063 m_protectedValuesMutex->unlock();
1064
1065 return count;
1066 }
1067
1068 size_t Heap::protectedObjectCount()
1069 {
1070 if (m_protectedValuesMutex)
1071 m_protectedValuesMutex->lock();
1072
1073 size_t result = m_protectedValues.size();
1074
1075 if (m_protectedValuesMutex)
1076 m_protectedValuesMutex->unlock();
1077
1078 return result;
1079 }
1080
1081 static const char* typeName(JSCell* cell)
1082 {
1083 if (cell->isString())
1084 return "string";
1085 if (cell->isNumber())
1086 return "number";
1087 if (cell->isGetterSetter())
1088 return "gettersetter";
1089 ASSERT(cell->isObject());
1090 const ClassInfo* info = static_cast<JSObject*>(cell)->classInfo();
1091 return info ? info->className : "Object";
1092 }
1093
1094 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1095 {
1096 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1097
1098 if (m_protectedValuesMutex)
1099 m_protectedValuesMutex->lock();
1100
1101 ProtectCountSet::iterator end = m_protectedValues.end();
1102 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1103 counts->add(typeName(it->first));
1104
1105 if (m_protectedValuesMutex)
1106 m_protectedValuesMutex->unlock();
1107
1108 return counts;
1109 }
1110
1111 bool Heap::isBusy()
1112 {
1113 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1114 }
1115
1116 Heap::iterator Heap::primaryHeapBegin()
1117 {
1118 return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1119 }
1120
1121 Heap::iterator Heap::primaryHeapEnd()
1122 {
1123 return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1124 }
1125
1126 } // namespace JSC