]> git.saurik.com Git - apple/javascriptcore.git/blame - kjs/collector.cpp
JavaScriptCore-461.tar.gz
[apple/javascriptcore.git] / kjs / collector.cpp
CommitLineData
b37bf2e1
A
1// -*- mode: c++; c-basic-offset: 4 -*-
2/*
3 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved.
4 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22#include "config.h"
23#include "collector.h"
24
25#include "ExecState.h"
26#include "JSGlobalObject.h"
27#include "internal.h"
28#include "list.h"
29#include "value.h"
30#include "JSLockC.h"
31#include <algorithm>
32#include <setjmp.h>
33#include <stdlib.h>
34#include <wtf/FastMalloc.h>
35#include <wtf/HashCountedSet.h>
36#include <wtf/UnusedParam.h>
37
38#if USE(MULTIPLE_THREADS)
39#include <pthread.h>
40#endif
41
42#if PLATFORM(DARWIN)
43
44#include <mach/mach_port.h>
45#include <mach/mach_init.h>
46#include <mach/task.h>
47#include <mach/thread_act.h>
48#include <mach/vm_map.h>
49
50#include "CollectorHeapIntrospector.h"
51
52#elif PLATFORM(WIN_OS)
53
54#include <windows.h>
55
56#elif PLATFORM(UNIX)
57
58#include <stdlib.h>
59#include <sys/mman.h>
60#include <unistd.h>
61
62#if PLATFORM(SOLARIS)
63#include <thread.h>
64#endif
65
66#if HAVE(PTHREAD_NP_H)
67#include <pthread_np.h>
68#else
69#include <pthread.h>
70#endif
71
72#endif
73
74#define DEBUG_COLLECTOR 0
75
76using std::max;
77
78namespace KJS {
79
80// tunable parameters
81
82const size_t SPARE_EMPTY_BLOCKS = 2;
83const size_t MIN_ARRAY_SIZE = 14;
84const size_t GROWTH_FACTOR = 2;
85const size_t LOW_WATER_FACTOR = 4;
86const size_t ALLOCATIONS_PER_COLLECTION = 4000;
87
88enum OperationInProgress { NoOperation, Allocation, Collection };
89
90struct CollectorHeap {
91 CollectorBlock** blocks;
92 size_t numBlocks;
93 size_t usedBlocks;
94 size_t firstBlockWithPossibleSpace;
95
96 size_t numLiveObjects;
97 size_t numLiveObjectsAtLastCollect;
98 size_t extraCost;
99
100 OperationInProgress operationInProgress;
101};
102
103static CollectorHeap primaryHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
104static CollectorHeap numberHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
105
106// FIXME: I don't think this needs to be a static data member of the Collector class.
107// Just a private global like "heap" above would be fine.
108size_t Collector::mainThreadOnlyObjectCount = 0;
109
110static CollectorBlock* allocateBlock()
111{
112#if PLATFORM(DARWIN)
113 vm_address_t address = 0;
114 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
115#elif PLATFORM(WIN_OS)
116 // windows virtual address granularity is naturally 64k
117 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
118#elif HAVE(POSIX_MEMALIGN)
119 void* address;
120 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
121 memset(address, 0, BLOCK_SIZE);
122#else
123 static size_t pagesize = getpagesize();
124
125 size_t extra = 0;
126 if (BLOCK_SIZE > pagesize)
127 extra = BLOCK_SIZE - pagesize;
128
129 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
130 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
131
132 size_t adjust = 0;
133 if ((address & BLOCK_OFFSET_MASK) != 0)
134 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
135
136 if (adjust > 0)
137 munmap(reinterpret_cast<void*>(address), adjust);
138
139 if (adjust < extra)
140 munmap(reinterpret_cast<void*>(address + adjust + BLOCK_SIZE), extra - adjust);
141
142 address += adjust;
143 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
144#endif
145
146 return reinterpret_cast<CollectorBlock*>(address);
147}
148
149static void freeBlock(CollectorBlock* block)
150{
151#if PLATFORM(DARWIN)
152 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
153#elif PLATFORM(WIN_OS)
154 VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
155#elif HAVE(POSIX_MEMALIGN)
156 free(block);
157#else
158 munmap(block, BLOCK_SIZE);
159#endif
160}
161
162void Collector::recordExtraCost(size_t cost)
163{
164 // Our frequency of garbage collection tries to balance memory use against speed
165 // by collecting based on the number of newly created values. However, for values
166 // that hold on to a great deal of memory that's not in the form of other JS values,
167 // that is not good enough - in some cases a lot of those objects can pile up and
168 // use crazy amounts of memory without a GC happening. So we track these extra
169 // memory costs. Only unusually large objects are noted, and we only keep track
170 // of this extra cost until the next GC. In garbage collected languages, most values
171 // are either very short lived temporaries, or have extremely long lifetimes. So
172 // if a large value survives one garbage collection, there is not much point to
173 // collecting more frequently as long as it stays alive.
174 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
175
176 primaryHeap.extraCost += cost;
177}
178
179template <Collector::HeapType heapType> struct HeapConstants;
180
181template <> struct HeapConstants<Collector::PrimaryHeap> {
182 static const size_t cellSize = CELL_SIZE;
183 static const size_t cellsPerBlock = CELLS_PER_BLOCK;
184 static const size_t bitmapShift = 0;
185 typedef CollectorCell Cell;
186 typedef CollectorBlock Block;
187};
188
189template <> struct HeapConstants<Collector::NumberHeap> {
190 static const size_t cellSize = SMALL_CELL_SIZE;
191 static const size_t cellsPerBlock = SMALL_CELLS_PER_BLOCK;
192 static const size_t bitmapShift = 1;
193 typedef SmallCollectorCell Cell;
194 typedef SmallCellCollectorBlock Block;
195};
196
197template <Collector::HeapType heapType> void* Collector::heapAllocate(size_t s)
198{
199 typedef typename HeapConstants<heapType>::Block Block;
200 typedef typename HeapConstants<heapType>::Cell Cell;
201
202 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
203 ASSERT(JSLock::lockCount() > 0);
204 ASSERT(JSLock::currentThreadIsHoldingLock());
205 ASSERT(s <= HeapConstants<heapType>::cellSize);
206 UNUSED_PARAM(s); // s is now only used for the above assert
207
208 ASSERT(heap.operationInProgress == NoOperation);
209 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
210 // FIXME: If another global variable access here doesn't hurt performance
211 // too much, we could abort() in NDEBUG builds, which could help ensure we
212 // don't spend any time debugging cases where we allocate inside an object's
213 // deallocation code.
214
215 size_t numLiveObjects = heap.numLiveObjects;
216 size_t usedBlocks = heap.usedBlocks;
217 size_t i = heap.firstBlockWithPossibleSpace;
218
219 // if we have a huge amount of extra cost, we'll try to collect even if we still have
220 // free cells left.
221 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
222 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
223 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
224 const size_t newCost = numNewObjects + heap.extraCost;
225 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
226 goto collect;
227 }
228
229 ASSERT(heap.operationInProgress == NoOperation);
230#ifndef NDEBUG
231 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
232 heap.operationInProgress = Allocation;
233#endif
234
235scan:
236 Block* targetBlock;
237 size_t targetBlockUsedCells;
238 if (i != usedBlocks) {
239 targetBlock = (Block*)heap.blocks[i];
240 targetBlockUsedCells = targetBlock->usedCells;
241 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
242 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
243 if (++i == usedBlocks)
244 goto collect;
245 targetBlock = (Block*)heap.blocks[i];
246 targetBlockUsedCells = targetBlock->usedCells;
247 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
248 }
249 heap.firstBlockWithPossibleSpace = i;
250 } else {
251
252collect:
253 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
254 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
255 const size_t newCost = numNewObjects + heap.extraCost;
256
257 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
258#ifndef NDEBUG
259 heap.operationInProgress = NoOperation;
260#endif
261 bool collected = collect();
262#ifndef NDEBUG
263 heap.operationInProgress = Allocation;
264#endif
265 if (collected) {
266 numLiveObjects = heap.numLiveObjects;
267 usedBlocks = heap.usedBlocks;
268 i = heap.firstBlockWithPossibleSpace;
269 goto scan;
270 }
271 }
272
273 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
274 size_t numBlocks = heap.numBlocks;
275 if (usedBlocks == numBlocks) {
276 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
277 heap.numBlocks = numBlocks;
278 heap.blocks = static_cast<CollectorBlock **>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock *)));
279 }
280
281 targetBlock = (Block*)allocateBlock();
282 targetBlock->freeList = targetBlock->cells;
283 targetBlockUsedCells = 0;
284 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock;
285 heap.usedBlocks = usedBlocks + 1;
286 heap.firstBlockWithPossibleSpace = usedBlocks;
287 }
288
289 // find a free spot in the block and detach it from the free list
290 Cell *newCell = targetBlock->freeList;
291
292 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
293 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
294
295 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
296 heap.numLiveObjects = numLiveObjects + 1;
297
298#ifndef NDEBUG
299 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
300 heap.operationInProgress = NoOperation;
301#endif
302
303 return newCell;
304}
305
306void* Collector::allocate(size_t s)
307{
308 return heapAllocate<PrimaryHeap>(s);
309}
310
311void* Collector::allocateNumber(size_t s)
312{
313 return heapAllocate<NumberHeap>(s);
314}
315
316static inline void* currentThreadStackBase()
317{
318#if PLATFORM(DARWIN)
319 pthread_t thread = pthread_self();
320 return pthread_get_stackaddr_np(thread);
321#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
322 // offset 0x18 from the FS segment register gives a pointer to
323 // the thread information block for the current thread
324 NT_TIB* pTib;
325 __asm {
326 MOV EAX, FS:[18h]
327 MOV pTib, EAX
328 }
329 return (void*)pTib->StackBase;
330#elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
331 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
332 return (void*)pTib->StackBase;
333#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
334 // offset 0x18 from the FS segment register gives a pointer to
335 // the thread information block for the current thread
336 NT_TIB* pTib;
337 asm ( "movl %%fs:0x18, %0\n"
338 : "=r" (pTib)
339 );
340 return (void*)pTib->StackBase;
341#elif PLATFORM(SOLARIS)
342 stack_t s;
343 thr_stksegment(&s);
344 return s.ss_sp;
345#elif PLATFORM(UNIX)
346 static void* stackBase = 0;
347 static size_t stackSize = 0;
348 static pthread_t stackThread;
349 pthread_t thread = pthread_self();
350 if (stackBase == 0 || thread != stackThread) {
351 pthread_attr_t sattr;
352 pthread_attr_init(&sattr);
353#if HAVE(PTHREAD_NP_H)
354 // e.g. on FreeBSD 5.4, neundorf@kde.org
355 pthread_attr_get_np(thread, &sattr);
356#else
357 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
358 pthread_getattr_np(thread, &sattr);
359#endif
360 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
361 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
362 ASSERT(stackBase);
363 pthread_attr_destroy(&sattr);
364 stackThread = thread;
365 }
366 return static_cast<char*>(stackBase) + stackSize;
367#else
368#error Need a way to get the stack base on this platform
369#endif
370}
371
372#if USE(MULTIPLE_THREADS)
373static pthread_t mainThread;
374#endif
375
376void Collector::registerAsMainThread()
377{
378#if USE(MULTIPLE_THREADS)
379 mainThread = pthread_self();
380#endif
381}
382
383static inline bool onMainThread()
384{
385#if USE(MULTIPLE_THREADS)
386#if PLATFORM(DARWIN)
387 pthread_t javaScriptCollectionThread = JSJavaScriptCollectionThread();
388 return javaScriptCollectionThread == 0 || javaScriptCollectionThread == pthread_self();
389#else
390 return !!pthread_equal(pthread_self(), mainThread);
391#endif
392#else
393 return true;
394#endif
395}
396
397#if USE(MULTIPLE_THREADS)
398
399#if PLATFORM(DARWIN)
400typedef mach_port_t PlatformThread;
401#elif PLATFORM(WIN_OS)
402struct PlatformThread {
403 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
404 DWORD id;
405 HANDLE handle;
406};
407#endif
408
409static inline PlatformThread getCurrentPlatformThread()
410{
411#if PLATFORM(DARWIN)
412 return pthread_mach_thread_np(pthread_self());
413#elif PLATFORM(WIN_OS)
414 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
415 return PlatformThread(GetCurrentThreadId(), threadHandle);
416#endif
417}
418
419class Collector::Thread {
420public:
421 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
422 : posixThread(pthread), platformThread(platThread), stackBase(base) {}
423 Thread* next;
424 pthread_t posixThread;
425 PlatformThread platformThread;
426 void* stackBase;
427};
428
429pthread_key_t registeredThreadKey;
430pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
431Collector::Thread* registeredThreads;
432
433static void destroyRegisteredThread(void* data)
434{
435 Collector::Thread* thread = (Collector::Thread*)data;
436
437 // Can't use JSLock convenience object here because we don't want to re-register
438 // an exiting thread.
439 JSLock::lock();
440
441 if (registeredThreads == thread) {
442 registeredThreads = registeredThreads->next;
443 } else {
444 Collector::Thread *last = registeredThreads;
445 Collector::Thread *t;
446 for (t = registeredThreads->next; t != NULL; t = t->next) {
447 if (t == thread) {
448 last->next = t->next;
449 break;
450 }
451 last = t;
452 }
453 ASSERT(t); // If t is NULL, we never found ourselves in the list.
454 }
455
456 JSLock::unlock();
457
458 delete thread;
459}
460
461static void initializeRegisteredThreadKey()
462{
463 pthread_key_create(&registeredThreadKey, destroyRegisteredThread);
464}
465
466void Collector::registerThread()
467{
468 ASSERT(JSLock::lockCount() > 0);
469 ASSERT(JSLock::currentThreadIsHoldingLock());
470
471 pthread_once(&registeredThreadKeyOnce, initializeRegisteredThreadKey);
472
473 if (!pthread_getspecific(registeredThreadKey)) {
474#if PLATFORM(DARWIN)
475 if (onMainThread())
476 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap);
477#endif
478
479 Collector::Thread *thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
480
481 thread->next = registeredThreads;
482 registeredThreads = thread;
483 pthread_setspecific(registeredThreadKey, thread);
484 }
485}
486
487#endif
488
489#define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
490
491// cell size needs to be a power of two for this to be valid
492#define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
493
494void Collector::markStackObjectsConservatively(void *start, void *end)
495{
496 if (start > end) {
497 void* tmp = start;
498 start = end;
499 end = tmp;
500 }
501
502 ASSERT(((char*)end - (char*)start) < 0x1000000);
503 ASSERT(IS_POINTER_ALIGNED(start));
504 ASSERT(IS_POINTER_ALIGNED(end));
505
506 char** p = (char**)start;
507 char** e = (char**)end;
508
509 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
510 size_t usedNumberBlocks = numberHeap.usedBlocks;
511 CollectorBlock **primaryBlocks = primaryHeap.blocks;
512 CollectorBlock **numberBlocks = numberHeap.blocks;
513
514 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
515
516 while (p != e) {
517 char* x = *p++;
518 if (IS_HALF_CELL_ALIGNED(x) && x) {
519 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
520 xAsBits &= CELL_ALIGN_MASK;
521 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
522 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
523 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
524 for (size_t block = 0; block < usedNumberBlocks; block++) {
525 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
526 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits));
527 goto endMarkLoop;
528 }
529 }
530
531 // Mark the primary heap
532 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
533 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
534 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) {
535 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
536 if (!imp->marked())
537 imp->mark();
538 }
539 break;
540 }
541 }
542 endMarkLoop:
543 ;
544 }
545 }
546}
547
548void Collector::markCurrentThreadConservatively()
549{
550 // setjmp forces volatile registers onto the stack
551 jmp_buf registers;
552#if COMPILER(MSVC)
553#pragma warning(push)
554#pragma warning(disable: 4611)
555#endif
556 setjmp(registers);
557#if COMPILER(MSVC)
558#pragma warning(pop)
559#endif
560
561 void* dummy;
562 void* stackPointer = &dummy;
563 void* stackBase = currentThreadStackBase();
564
565 markStackObjectsConservatively(stackPointer, stackBase);
566}
567
568#if USE(MULTIPLE_THREADS)
569
570static inline void suspendThread(const PlatformThread& platformThread)
571{
572#if PLATFORM(DARWIN)
573 thread_suspend(platformThread);
574#elif PLATFORM(WIN_OS)
575 SuspendThread(platformThread.handle);
576#else
577#error Need a way to suspend threads on this platform
578#endif
579}
580
581static inline void resumeThread(const PlatformThread& platformThread)
582{
583#if PLATFORM(DARWIN)
584 thread_resume(platformThread);
585#elif PLATFORM(WIN_OS)
586 ResumeThread(platformThread.handle);
587#else
588#error Need a way to resume threads on this platform
589#endif
590}
591
592typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
593
594#if PLATFORM(DARWIN)
595
596#if PLATFORM(X86)
597typedef i386_thread_state_t PlatformThreadRegisters;
598#elif PLATFORM(X86_64)
599typedef x86_thread_state64_t PlatformThreadRegisters;
600#elif PLATFORM(PPC)
601typedef ppc_thread_state_t PlatformThreadRegisters;
602#elif PLATFORM(PPC64)
603typedef ppc_thread_state64_t PlatformThreadRegisters;
604#elif PLATFORM(ARM)
605typedef arm_thread_state_t PlatformThreadRegisters;
606#else
607#error Unknown Architecture
608#endif
609
610#elif PLATFORM(WIN_OS)&& PLATFORM(X86)
611typedef CONTEXT PlatformThreadRegisters;
612#else
613#error Need a thread register struct for this platform
614#endif
615
616size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
617{
618#if PLATFORM(DARWIN)
619
620#if PLATFORM(X86)
621 unsigned user_count = sizeof(regs)/sizeof(int);
622 thread_state_flavor_t flavor = i386_THREAD_STATE;
623#elif PLATFORM(X86_64)
624 unsigned user_count = x86_THREAD_STATE64_COUNT;
625 thread_state_flavor_t flavor = x86_THREAD_STATE64;
626#elif PLATFORM(PPC)
627 unsigned user_count = PPC_THREAD_STATE_COUNT;
628 thread_state_flavor_t flavor = PPC_THREAD_STATE;
629#elif PLATFORM(PPC64)
630 unsigned user_count = PPC_THREAD_STATE64_COUNT;
631 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
632#elif PLATFORM(ARM)
633 unsigned user_count = ARM_THREAD_STATE_COUNT;
634 thread_state_flavor_t flavor = ARM_THREAD_STATE;
635#else
636#error Unknown Architecture
637#endif
638
639 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
640 if (result != KERN_SUCCESS) {
641 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
642 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
643 CRASH();
644 }
645 return user_count * sizeof(usword_t);
646// end PLATFORM(DARWIN)
647
648#elif PLATFORM(WIN_OS) && PLATFORM(X86)
649 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
650 GetThreadContext(platformThread.handle, &regs);
651 return sizeof(CONTEXT);
652#else
653#error Need a way to get thread registers on this platform
654#endif
655}
656
657static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
658{
659#if PLATFORM(DARWIN)
660
661#if __DARWIN_UNIX03
662
663#if PLATFORM(X86)
664 return (void*)regs.__esp;
665#elif PLATFORM(X86_64)
666 return (void*)regs.__rsp;
667#elif PLATFORM(PPC) || PLATFORM(PPC64)
668 return (void*)regs.__r1;
669#elif PLATFORM(ARM)
670 return (void*)regs.__sp;
671#else
672#error Unknown Architecture
673#endif
674
675#else // !__DARWIN_UNIX03
676
677#if PLATFORM(X86)
678 return (void*)regs.esp;
679#elif PLATFORM(X86_64)
680 return (void*)regs.rsp;
681#elif (PLATFORM(PPC) || PLATFORM(PPC64))
682 return (void*)regs.r1;
683#else
684#error Unknown Architecture
685#endif
686
687#endif // __DARWIN_UNIX03
688
689// end PLATFORM(DARWIN)
690#elif PLATFORM(X86) && PLATFORM(WIN_OS)
691 return (void*)(uintptr_t)regs.Esp;
692#else
693#error Need a way to get the stack pointer for another thread on this platform
694#endif
695}
696
697void Collector::markOtherThreadConservatively(Thread* thread)
698{
699 suspendThread(thread->platformThread);
700
701 PlatformThreadRegisters regs;
702 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
703
704 // mark the thread's registers
705 markStackObjectsConservatively((void*)&regs, (void*)((char*)&regs + regSize));
706
707 void* stackPointer = otherThreadStackPointer(regs);
708 markStackObjectsConservatively(stackPointer, thread->stackBase);
709
710 resumeThread(thread->platformThread);
711}
712
713#endif
714
715void Collector::markStackObjectsConservatively()
716{
717 markCurrentThreadConservatively();
718
719#if USE(MULTIPLE_THREADS)
720 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
721 if (!pthread_equal(thread->posixThread, pthread_self())) {
722 markOtherThreadConservatively(thread);
723 }
724 }
725#endif
726}
727
728typedef HashCountedSet<JSCell*> ProtectCountSet;
729
730static ProtectCountSet& protectedValues()
731{
732 static ProtectCountSet staticProtectCountSet;
733 return staticProtectCountSet;
734}
735
736void Collector::protect(JSValue *k)
737{
738 ASSERT(k);
739 ASSERT(JSLock::lockCount() > 0);
740 ASSERT(JSLock::currentThreadIsHoldingLock());
741
742 if (JSImmediate::isImmediate(k))
743 return;
744
745 protectedValues().add(k->asCell());
746}
747
748void Collector::unprotect(JSValue *k)
749{
750 ASSERT(k);
751 ASSERT(JSLock::lockCount() > 0);
752 ASSERT(JSLock::currentThreadIsHoldingLock());
753
754 if (JSImmediate::isImmediate(k))
755 return;
756
757 protectedValues().remove(k->asCell());
758}
759
760void Collector::collectOnMainThreadOnly(JSValue* value)
761{
762 ASSERT(value);
763 ASSERT(JSLock::lockCount() > 0);
764 ASSERT(JSLock::currentThreadIsHoldingLock());
765
766 if (JSImmediate::isImmediate(value))
767 return;
768
769 JSCell* cell = value->asCell();
770 cellBlock(cell)->collectOnMainThreadOnly.set(cellOffset(cell));
771 ++mainThreadOnlyObjectCount;
772}
773
774void Collector::markProtectedObjects()
775{
776 ProtectCountSet& protectedValues = KJS::protectedValues();
777 ProtectCountSet::iterator end = protectedValues.end();
778 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) {
779 JSCell *val = it->first;
780 if (!val->marked())
781 val->mark();
782 }
783}
784
785void Collector::markMainThreadOnlyObjects()
786{
787#if USE(MULTIPLE_THREADS)
788 ASSERT(!onMainThread());
789#endif
790
791 // Optimization for clients that never register "main thread only" objects.
792 if (!mainThreadOnlyObjectCount)
793 return;
794
795 // FIXME: We can optimize this marking algorithm by keeping an exact set of
796 // "main thread only" objects when the "main thread only" object count is
797 // small. We don't want to keep an exact set all the time, because WebCore
798 // tends to create lots of "main thread only" objects, and all that set
799 // thrashing can be expensive.
800
801 size_t count = 0;
802
803 // We don't look at the numberHeap as primitive values can never be marked as main thread only
804 for (size_t block = 0; block < primaryHeap.usedBlocks; block++) {
805 ASSERT(count < mainThreadOnlyObjectCount);
806
807 CollectorBlock* curBlock = primaryHeap.blocks[block];
808 size_t minimumCellsToProcess = curBlock->usedCells;
809 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
810 CollectorCell* cell = curBlock->cells + i;
811 if (cell->u.freeCell.zeroIfFree == 0)
812 ++minimumCellsToProcess;
813 else {
814 if (curBlock->collectOnMainThreadOnly.get(i)) {
815 if (!curBlock->marked.get(i)) {
816 JSCell* imp = reinterpret_cast<JSCell*>(cell);
817 imp->mark();
818 }
819 if (++count == mainThreadOnlyObjectCount)
820 return;
821 }
822 }
823 }
824 }
825}
826
827template <Collector::HeapType heapType> size_t Collector::sweep(bool currentThreadIsMainThread)
828{
829 typedef typename HeapConstants<heapType>::Block Block;
830 typedef typename HeapConstants<heapType>::Cell Cell;
831
832 UNUSED_PARAM(currentThreadIsMainThread); // currentThreadIsMainThread is only used in ASSERTs
833 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
834 CollectorHeap& heap = heapType == Collector::PrimaryHeap ? primaryHeap : numberHeap;
835
836 size_t emptyBlocks = 0;
837 size_t numLiveObjects = heap.numLiveObjects;
838
839 for (size_t block = 0; block < heap.usedBlocks; block++) {
840 Block* curBlock = (Block*)heap.blocks[block];
841
842 size_t usedCells = curBlock->usedCells;
843 Cell* freeList = curBlock->freeList;
844
845 if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
846 // special case with a block where all cells are used -- testing indicates this happens often
847 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
848 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
849 Cell* cell = curBlock->cells + i;
850
851 if (heapType != Collector::NumberHeap) {
852 JSCell* imp = reinterpret_cast<JSCell*>(cell);
853 // special case for allocated but uninitialized object
854 // (We don't need this check earlier because nothing prior this point
855 // assumes the object has a valid vptr.)
856 if (cell->u.freeCell.zeroIfFree == 0)
857 continue;
858
859 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
860 if (curBlock->collectOnMainThreadOnly.get(i)) {
861 curBlock->collectOnMainThreadOnly.clear(i);
862 --Collector::mainThreadOnlyObjectCount;
863 }
864 imp->~JSCell();
865 }
866
867 --usedCells;
868 --numLiveObjects;
869
870 // put cell on the free list
871 cell->u.freeCell.zeroIfFree = 0;
872 cell->u.freeCell.next = freeList - (cell + 1);
873 freeList = cell;
874 }
875 }
876 } else {
877 size_t minimumCellsToProcess = usedCells;
878 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
879 Cell *cell = curBlock->cells + i;
880 if (cell->u.freeCell.zeroIfFree == 0) {
881 ++minimumCellsToProcess;
882 } else {
883 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
884 if (heapType != Collector::NumberHeap) {
885 JSCell *imp = reinterpret_cast<JSCell*>(cell);
886 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
887 if (curBlock->collectOnMainThreadOnly.get(i)) {
888 curBlock->collectOnMainThreadOnly.clear(i);
889 --Collector::mainThreadOnlyObjectCount;
890 }
891 imp->~JSCell();
892 }
893 --usedCells;
894 --numLiveObjects;
895
896 // put cell on the free list
897 cell->u.freeCell.zeroIfFree = 0;
898 cell->u.freeCell.next = freeList - (cell + 1);
899 freeList = cell;
900 }
901 }
902 }
903 }
904
905 curBlock->usedCells = static_cast<uint32_t>(usedCells);
906 curBlock->freeList = freeList;
907 curBlock->marked.clearAll();
908
909 if (usedCells == 0) {
910 emptyBlocks++;
911 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
912#if !DEBUG_COLLECTOR
913 freeBlock((CollectorBlock*)curBlock);
914#endif
915 // swap with the last block so we compact as we go
916 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
917 heap.usedBlocks--;
918 block--; // Don't move forward a step in this case
919
920 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
921 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
922 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock *));
923 }
924 }
925 }
926 }
927
928 if (heap.numLiveObjects != numLiveObjects)
929 heap.firstBlockWithPossibleSpace = 0;
930
931 heap.numLiveObjects = numLiveObjects;
932 heap.numLiveObjectsAtLastCollect = numLiveObjects;
933 heap.extraCost = 0;
934 return numLiveObjects;
935}
936
937bool Collector::collect()
938{
939 ASSERT(JSLock::lockCount() > 0);
940 ASSERT(JSLock::currentThreadIsHoldingLock());
941
942 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
943 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
944 abort();
945
946 primaryHeap.operationInProgress = Collection;
947 numberHeap.operationInProgress = Collection;
948
949 bool currentThreadIsMainThread = onMainThread();
950
951 // MARK: first mark all referenced objects recursively starting out from the set of root objects
952
953#ifndef NDEBUG
954 // Forbid malloc during the mark phase. Marking a thread suspends it, so
955 // a malloc inside mark() would risk a deadlock with a thread that had been
956 // suspended while holding the malloc lock.
957 fastMallocForbid();
958#endif
959
960 markStackObjectsConservatively();
961 markProtectedObjects();
962 ExecState::markActiveExecStates();
963 List::markProtectedLists();
964#if USE(MULTIPLE_THREADS)
965 if (!currentThreadIsMainThread)
966 markMainThreadOnlyObjects();
967#endif
968
969#ifndef NDEBUG
970 fastMallocAllow();
971#endif
972
973 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
974 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread);
975 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread);
976
977 primaryHeap.operationInProgress = NoOperation;
978 numberHeap.operationInProgress = NoOperation;
979
980 return numLiveObjects < originalLiveObjects;
981}
982
983size_t Collector::size()
984{
985 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
986}
987
988size_t Collector::globalObjectCount()
989{
990 size_t count = 0;
991 if (JSGlobalObject::head()) {
992 JSGlobalObject* o = JSGlobalObject::head();
993 do {
994 ++count;
995 o = o->next();
996 } while (o != JSGlobalObject::head());
997 }
998 return count;
999}
1000
1001size_t Collector::protectedGlobalObjectCount()
1002{
1003 size_t count = 0;
1004 if (JSGlobalObject::head()) {
1005 JSGlobalObject* o = JSGlobalObject::head();
1006 do {
1007 if (protectedValues().contains(o))
1008 ++count;
1009 o = o->next();
1010 } while (o != JSGlobalObject::head());
1011 }
1012 return count;
1013}
1014
1015size_t Collector::protectedObjectCount()
1016{
1017 return protectedValues().size();
1018}
1019
1020static const char *typeName(JSCell *val)
1021{
1022 const char *name = "???";
1023 switch (val->type()) {
1024 case UnspecifiedType:
1025 break;
1026 case UndefinedType:
1027 name = "undefined";
1028 break;
1029 case NullType:
1030 name = "null";
1031 break;
1032 case BooleanType:
1033 name = "boolean";
1034 break;
1035 case StringType:
1036 name = "string";
1037 break;
1038 case NumberType:
1039 name = "number";
1040 break;
1041 case ObjectType: {
1042 const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
1043 name = info ? info->className : "Object";
1044 break;
1045 }
1046 case GetterSetterType:
1047 name = "gettersetter";
1048 break;
1049 }
1050 return name;
1051}
1052
1053HashCountedSet<const char*>* Collector::protectedObjectTypeCounts()
1054{
1055 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1056
1057 ProtectCountSet& protectedValues = KJS::protectedValues();
1058 ProtectCountSet::iterator end = protectedValues.end();
1059 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it)
1060 counts->add(typeName(it->first));
1061
1062 return counts;
1063}
1064
1065bool Collector::isBusy()
1066{
1067 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1068}
1069
1070void Collector::reportOutOfMemoryToAllExecStates()
1071{
1072 ExecStateStack::const_iterator end = ExecState::activeExecStates().end();
1073 for (ExecStateStack::const_iterator it = ExecState::activeExecStates().begin(); it != end; ++it) {
1074 (*it)->setException(Error::create(*it, GeneralError, "Out of memory"));
1075 }
1076}
1077
1078} // namespace KJS