]>
Commit | Line | Data |
---|---|---|
9dae56ea | 1 | /* |
f9bf01c6 | 2 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. |
9dae56ea A |
3 | * Copyright (C) 2007 Eric Seidel <eric@webkit.org> |
4 | * | |
5 | * This library is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU Lesser General Public | |
7 | * License as published by the Free Software Foundation; either | |
8 | * version 2 of the License, or (at your option) any later version. | |
9 | * | |
10 | * This library is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * Lesser General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU Lesser General Public | |
16 | * License along with this library; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | * | |
19 | */ | |
20 | ||
21 | #include "config.h" | |
22 | #include "Collector.h" | |
23 | ||
24 | #include "ArgList.h" | |
25 | #include "CallFrame.h" | |
f9bf01c6 | 26 | #include "CodeBlock.h" |
9dae56ea A |
27 | #include "CollectorHeapIterator.h" |
28 | #include "Interpreter.h" | |
f9bf01c6 | 29 | #include "JSArray.h" |
9dae56ea A |
30 | #include "JSGlobalObject.h" |
31 | #include "JSLock.h" | |
ba379fdc | 32 | #include "JSONObject.h" |
9dae56ea A |
33 | #include "JSString.h" |
34 | #include "JSValue.h" | |
f9bf01c6 A |
35 | #include "JSZombie.h" |
36 | #include "MarkStack.h" | |
9dae56ea A |
37 | #include "Nodes.h" |
38 | #include "Tracing.h" | |
39 | #include <algorithm> | |
ba379fdc | 40 | #include <limits.h> |
9dae56ea A |
41 | #include <setjmp.h> |
42 | #include <stdlib.h> | |
43 | #include <wtf/FastMalloc.h> | |
44 | #include <wtf/HashCountedSet.h> | |
45 | #include <wtf/UnusedParam.h> | |
ba379fdc | 46 | #include <wtf/VMTags.h> |
9dae56ea | 47 | |
f9bf01c6 | 48 | #if OS(DARWIN) |
9dae56ea | 49 | |
9dae56ea | 50 | #include <mach/mach_init.h> |
ba379fdc | 51 | #include <mach/mach_port.h> |
9dae56ea A |
52 | #include <mach/task.h> |
53 | #include <mach/thread_act.h> | |
54 | #include <mach/vm_map.h> | |
55 | ||
f9bf01c6 | 56 | #elif OS(WINDOWS) |
9dae56ea A |
57 | |
58 | #include <windows.h> | |
f9bf01c6 A |
59 | #include <malloc.h> |
60 | ||
61 | #elif OS(HAIKU) | |
9dae56ea | 62 | |
f9bf01c6 A |
63 | #include <OS.h> |
64 | ||
65 | #elif OS(UNIX) | |
9dae56ea A |
66 | |
67 | #include <stdlib.h> | |
f9bf01c6 | 68 | #if !OS(HAIKU) |
9dae56ea | 69 | #include <sys/mman.h> |
f9bf01c6 | 70 | #endif |
9dae56ea A |
71 | #include <unistd.h> |
72 | ||
f9bf01c6 | 73 | #if OS(SOLARIS) |
9dae56ea | 74 | #include <thread.h> |
ba379fdc | 75 | #else |
9dae56ea A |
76 | #include <pthread.h> |
77 | #endif | |
78 | ||
79 | #if HAVE(PTHREAD_NP_H) | |
80 | #include <pthread_np.h> | |
81 | #endif | |
82 | ||
f9bf01c6 A |
83 | #if OS(QNX) |
84 | #include <fcntl.h> | |
85 | #include <sys/procfs.h> | |
86 | #include <stdio.h> | |
87 | #include <errno.h> | |
88 | #endif | |
89 | ||
9dae56ea A |
90 | #endif |
91 | ||
9dae56ea A |
92 | #define COLLECT_ON_EVERY_ALLOCATION 0 |
93 | ||
94 | using std::max; | |
95 | ||
96 | namespace JSC { | |
97 | ||
98 | // tunable parameters | |
99 | ||
9dae56ea A |
100 | const size_t GROWTH_FACTOR = 2; |
101 | const size_t LOW_WATER_FACTOR = 4; | |
f9bf01c6 | 102 | const size_t ALLOCATIONS_PER_COLLECTION = 3600; |
9dae56ea A |
103 | // This value has to be a macro to be used in max() without introducing |
104 | // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. | |
105 | #define MIN_ARRAY_SIZE (static_cast<size_t>(14)) | |
106 | ||
9dae56ea A |
107 | #if ENABLE(JSC_MULTIPLE_THREADS) |
108 | ||
f9bf01c6 | 109 | #if OS(DARWIN) |
9dae56ea | 110 | typedef mach_port_t PlatformThread; |
f9bf01c6 A |
111 | #elif OS(WINDOWS) |
112 | typedef HANDLE PlatformThread; | |
9dae56ea A |
113 | #endif |
114 | ||
115 | class Heap::Thread { | |
116 | public: | |
117 | Thread(pthread_t pthread, const PlatformThread& platThread, void* base) | |
118 | : posixThread(pthread) | |
119 | , platformThread(platThread) | |
120 | , stackBase(base) | |
121 | { | |
122 | } | |
123 | ||
124 | Thread* next; | |
125 | pthread_t posixThread; | |
126 | PlatformThread platformThread; | |
127 | void* stackBase; | |
128 | }; | |
129 | ||
130 | #endif | |
131 | ||
132 | Heap::Heap(JSGlobalData* globalData) | |
133 | : m_markListSet(0) | |
134 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
135 | , m_registeredThreads(0) | |
136 | , m_currentThreadRegistrar(0) | |
4e4e5a6f A |
137 | #endif |
138 | #if OS(SYMBIAN) | |
139 | , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE) | |
9dae56ea A |
140 | #endif |
141 | , m_globalData(globalData) | |
142 | { | |
143 | ASSERT(globalData); | |
f9bf01c6 A |
144 | memset(&m_heap, 0, sizeof(CollectorHeap)); |
145 | allocateBlock(); | |
9dae56ea A |
146 | } |
147 | ||
148 | Heap::~Heap() | |
149 | { | |
150 | // The destroy function must already have been called, so assert this. | |
151 | ASSERT(!m_globalData); | |
152 | } | |
153 | ||
154 | void Heap::destroy() | |
155 | { | |
f9bf01c6 | 156 | JSLock lock(SilenceAssertionsOnly); |
9dae56ea A |
157 | |
158 | if (!m_globalData) | |
159 | return; | |
160 | ||
f9bf01c6 A |
161 | ASSERT(!m_globalData->dynamicGlobalObject); |
162 | ASSERT(!isBusy()); | |
163 | ||
9dae56ea A |
164 | // The global object is not GC protected at this point, so sweeping may delete it |
165 | // (and thus the global data) before other objects that may use the global data. | |
166 | RefPtr<JSGlobalData> protect(m_globalData); | |
167 | ||
168 | delete m_markListSet; | |
169 | m_markListSet = 0; | |
170 | ||
f9bf01c6 | 171 | freeBlocks(); |
9dae56ea A |
172 | |
173 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
174 | if (m_currentThreadRegistrar) { | |
175 | int error = pthread_key_delete(m_currentThreadRegistrar); | |
176 | ASSERT_UNUSED(error, !error); | |
177 | } | |
178 | ||
179 | MutexLocker registeredThreadsLock(m_registeredThreadsMutex); | |
180 | for (Heap::Thread* t = m_registeredThreads; t;) { | |
181 | Heap::Thread* next = t->next; | |
182 | delete t; | |
183 | t = next; | |
184 | } | |
185 | #endif | |
4e4e5a6f A |
186 | #if OS(SYMBIAN) |
187 | m_blockallocator.destroy(); | |
188 | #endif | |
9dae56ea A |
189 | m_globalData = 0; |
190 | } | |
191 | ||
f9bf01c6 | 192 | NEVER_INLINE CollectorBlock* Heap::allocateBlock() |
9dae56ea | 193 | { |
f9bf01c6 | 194 | #if OS(DARWIN) |
9dae56ea | 195 | vm_address_t address = 0; |
ba379fdc | 196 | vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); |
f9bf01c6 | 197 | #elif OS(SYMBIAN) |
4e4e5a6f A |
198 | void* address = m_blockallocator.alloc(); |
199 | if (!address) | |
f9bf01c6 | 200 | CRASH(); |
f9bf01c6 A |
201 | #elif OS(WINCE) |
202 | void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); | |
203 | #elif OS(WINDOWS) | |
4e4e5a6f | 204 | #if COMPILER(MINGW) && !COMPILER(MINGW64) |
f9bf01c6 A |
205 | void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); |
206 | #else | |
207 | void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); | |
208 | #endif | |
209 | memset(address, 0, BLOCK_SIZE); | |
9dae56ea A |
210 | #elif HAVE(POSIX_MEMALIGN) |
211 | void* address; | |
212 | posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE); | |
9dae56ea A |
213 | #else |
214 | ||
215 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
216 | #error Need to initialize pagesize safely. | |
217 | #endif | |
218 | static size_t pagesize = getpagesize(); | |
219 | ||
220 | size_t extra = 0; | |
221 | if (BLOCK_SIZE > pagesize) | |
222 | extra = BLOCK_SIZE - pagesize; | |
223 | ||
224 | void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); | |
225 | uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); | |
226 | ||
227 | size_t adjust = 0; | |
228 | if ((address & BLOCK_OFFSET_MASK) != 0) | |
229 | adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); | |
230 | ||
231 | if (adjust > 0) | |
232 | munmap(reinterpret_cast<char*>(address), adjust); | |
233 | ||
234 | if (adjust < extra) | |
235 | munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust); | |
236 | ||
237 | address += adjust; | |
9dae56ea | 238 | #endif |
f9bf01c6 A |
239 | |
240 | // Initialize block. | |
241 | ||
242 | CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address); | |
243 | block->heap = this; | |
244 | clearMarkBits(block); | |
245 | ||
246 | Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); | |
247 | for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i) | |
248 | new (block->cells + i) JSCell(dummyMarkableCellStructure); | |
249 | ||
250 | // Add block to blocks vector. | |
251 | ||
252 | size_t numBlocks = m_heap.numBlocks; | |
253 | if (m_heap.usedBlocks == numBlocks) { | |
254 | static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; | |
255 | if (numBlocks > maxNumBlocks) | |
256 | CRASH(); | |
257 | numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); | |
258 | m_heap.numBlocks = numBlocks; | |
259 | m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*))); | |
260 | } | |
261 | m_heap.blocks[m_heap.usedBlocks++] = block; | |
262 | ||
263 | return block; | |
264 | } | |
265 | ||
266 | NEVER_INLINE void Heap::freeBlock(size_t block) | |
267 | { | |
268 | m_heap.didShrink = true; | |
269 | ||
270 | ObjectIterator it(m_heap, block); | |
271 | ObjectIterator end(m_heap, block + 1); | |
272 | for ( ; it != end; ++it) | |
273 | (*it)->~JSCell(); | |
274 | freeBlockPtr(m_heap.blocks[block]); | |
275 | ||
276 | // swap with the last block so we compact as we go | |
277 | m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1]; | |
278 | m_heap.usedBlocks--; | |
279 | ||
280 | if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) { | |
281 | m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR; | |
282 | m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*))); | |
283 | } | |
9dae56ea A |
284 | } |
285 | ||
f9bf01c6 | 286 | NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block) |
9dae56ea | 287 | { |
f9bf01c6 | 288 | #if OS(DARWIN) |
9dae56ea | 289 | vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE); |
f9bf01c6 | 290 | #elif OS(SYMBIAN) |
4e4e5a6f | 291 | m_blockallocator.free(reinterpret_cast<void*>(block)); |
f9bf01c6 | 292 | #elif OS(WINCE) |
9dae56ea | 293 | VirtualFree(block, 0, MEM_RELEASE); |
f9bf01c6 | 294 | #elif OS(WINDOWS) |
4e4e5a6f | 295 | #if COMPILER(MINGW) && !COMPILER(MINGW64) |
f9bf01c6 A |
296 | __mingw_aligned_free(block); |
297 | #else | |
298 | _aligned_free(block); | |
299 | #endif | |
9dae56ea A |
300 | #elif HAVE(POSIX_MEMALIGN) |
301 | free(block); | |
302 | #else | |
303 | munmap(reinterpret_cast<char*>(block), BLOCK_SIZE); | |
304 | #endif | |
305 | } | |
306 | ||
f9bf01c6 | 307 | void Heap::freeBlocks() |
9dae56ea | 308 | { |
f9bf01c6 A |
309 | ProtectCountSet protectedValuesCopy = m_protectedValues; |
310 | ||
311 | clearMarkBits(); | |
312 | ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end(); | |
313 | for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) | |
314 | markCell(it->first); | |
315 | ||
316 | m_heap.nextCell = 0; | |
317 | m_heap.nextBlock = 0; | |
318 | DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); | |
319 | DeadObjectIterator end(m_heap, m_heap.usedBlocks); | |
320 | for ( ; it != end; ++it) | |
321 | (*it)->~JSCell(); | |
322 | ||
323 | ASSERT(!protectedObjectCount()); | |
324 | ||
325 | protectedValuesEnd = protectedValuesCopy.end(); | |
326 | for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) | |
327 | it->first->~JSCell(); | |
328 | ||
329 | for (size_t block = 0; block < m_heap.usedBlocks; ++block) | |
330 | freeBlockPtr(m_heap.blocks[block]); | |
331 | ||
332 | fastFree(m_heap.blocks); | |
333 | ||
334 | memset(&m_heap, 0, sizeof(CollectorHeap)); | |
9dae56ea A |
335 | } |
336 | ||
337 | void Heap::recordExtraCost(size_t cost) | |
338 | { | |
339 | // Our frequency of garbage collection tries to balance memory use against speed | |
340 | // by collecting based on the number of newly created values. However, for values | |
341 | // that hold on to a great deal of memory that's not in the form of other JS values, | |
342 | // that is not good enough - in some cases a lot of those objects can pile up and | |
343 | // use crazy amounts of memory without a GC happening. So we track these extra | |
344 | // memory costs. Only unusually large objects are noted, and we only keep track | |
345 | // of this extra cost until the next GC. In garbage collected languages, most values | |
346 | // are either very short lived temporaries, or have extremely long lifetimes. So | |
347 | // if a large value survives one garbage collection, there is not much point to | |
348 | // collecting more frequently as long as it stays alive. | |
9dae56ea | 349 | |
f9bf01c6 A |
350 | if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) { |
351 | // If the last iteration through the heap deallocated blocks, we need | |
352 | // to clean up remaining garbage before marking. Otherwise, the conservative | |
353 | // marking mechanism might follow a pointer to unmapped memory. | |
354 | if (m_heap.didShrink) | |
355 | sweep(); | |
356 | reset(); | |
357 | } | |
358 | m_heap.extraCost += cost; | |
9dae56ea A |
359 | } |
360 | ||
f9bf01c6 | 361 | void* Heap::allocate(size_t s) |
9dae56ea | 362 | { |
f9bf01c6 A |
363 | typedef HeapConstants::Block Block; |
364 | typedef HeapConstants::Cell Cell; | |
365 | ||
9dae56ea A |
366 | ASSERT(JSLock::lockCount() > 0); |
367 | ASSERT(JSLock::currentThreadIsHoldingLock()); | |
f9bf01c6 | 368 | ASSERT_UNUSED(s, s <= HeapConstants::cellSize); |
9dae56ea | 369 | |
f9bf01c6 | 370 | ASSERT(m_heap.operationInProgress == NoOperation); |
9dae56ea A |
371 | |
372 | #if COLLECT_ON_EVERY_ALLOCATION | |
f9bf01c6 A |
373 | collectAllGarbage(); |
374 | ASSERT(m_heap.operationInProgress == NoOperation); | |
9dae56ea A |
375 | #endif |
376 | ||
f9bf01c6 | 377 | allocate: |
9dae56ea | 378 | |
f9bf01c6 | 379 | // Fast case: find the next garbage cell and recycle it. |
9dae56ea | 380 | |
f9bf01c6 A |
381 | do { |
382 | ASSERT(m_heap.nextBlock < m_heap.usedBlocks); | |
383 | Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]); | |
384 | do { | |
385 | ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock); | |
386 | if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block | |
387 | Cell* cell = block->cells + m_heap.nextCell; | |
9dae56ea | 388 | |
f9bf01c6 A |
389 | m_heap.operationInProgress = Allocation; |
390 | JSCell* imp = reinterpret_cast<JSCell*>(cell); | |
391 | imp->~JSCell(); | |
392 | m_heap.operationInProgress = NoOperation; | |
9dae56ea | 393 | |
f9bf01c6 A |
394 | ++m_heap.nextCell; |
395 | return cell; | |
9dae56ea | 396 | } |
f9bf01c6 A |
397 | } while (++m_heap.nextCell != HeapConstants::cellsPerBlock); |
398 | m_heap.nextCell = 0; | |
399 | } while (++m_heap.nextBlock != m_heap.usedBlocks); | |
9dae56ea | 400 | |
f9bf01c6 | 401 | // Slow case: reached the end of the heap. Mark live objects and start over. |
9dae56ea | 402 | |
f9bf01c6 A |
403 | reset(); |
404 | goto allocate; | |
405 | } | |
9dae56ea | 406 | |
f9bf01c6 A |
407 | void Heap::resizeBlocks() |
408 | { | |
409 | m_heap.didShrink = false; | |
9dae56ea | 410 | |
f9bf01c6 A |
411 | size_t usedCellCount = markedCells(); |
412 | size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount); | |
413 | size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; | |
414 | ||
415 | size_t maxCellCount = 1.25f * minCellCount; | |
416 | size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; | |
9dae56ea | 417 | |
f9bf01c6 A |
418 | if (m_heap.usedBlocks < minBlockCount) |
419 | growBlocks(minBlockCount); | |
420 | else if (m_heap.usedBlocks > maxBlockCount) | |
421 | shrinkBlocks(maxBlockCount); | |
9dae56ea A |
422 | } |
423 | ||
f9bf01c6 | 424 | void Heap::growBlocks(size_t neededBlocks) |
9dae56ea | 425 | { |
f9bf01c6 A |
426 | ASSERT(m_heap.usedBlocks < neededBlocks); |
427 | while (m_heap.usedBlocks < neededBlocks) | |
428 | allocateBlock(); | |
9dae56ea A |
429 | } |
430 | ||
f9bf01c6 | 431 | void Heap::shrinkBlocks(size_t neededBlocks) |
9dae56ea | 432 | { |
f9bf01c6 A |
433 | ASSERT(m_heap.usedBlocks > neededBlocks); |
434 | ||
435 | // Clear the always-on last bit, so isEmpty() isn't fooled by it. | |
436 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) | |
437 | m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1); | |
438 | ||
439 | for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) { | |
440 | if (m_heap.blocks[i]->marked.isEmpty()) { | |
441 | freeBlock(i); | |
442 | } else | |
443 | ++i; | |
444 | } | |
445 | ||
446 | // Reset the always-on last bit. | |
447 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) | |
448 | m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1); | |
9dae56ea A |
449 | } |
450 | ||
f9bf01c6 | 451 | #if OS(WINCE) |
4e4e5a6f | 452 | JS_EXPORTDATA void* g_stackBase = 0; |
ba379fdc A |
453 | |
454 | inline bool isPageWritable(void* page) | |
455 | { | |
456 | MEMORY_BASIC_INFORMATION memoryInformation; | |
457 | DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); | |
458 | ||
459 | // return false on error, including ptr outside memory | |
460 | if (result != sizeof(memoryInformation)) | |
461 | return false; | |
462 | ||
463 | DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE); | |
464 | return protect == PAGE_READWRITE | |
465 | || protect == PAGE_WRITECOPY | |
466 | || protect == PAGE_EXECUTE_READWRITE | |
467 | || protect == PAGE_EXECUTE_WRITECOPY; | |
468 | } | |
469 | ||
470 | static void* getStackBase(void* previousFrame) | |
471 | { | |
472 | // find the address of this stack frame by taking the address of a local variable | |
473 | bool isGrowingDownward; | |
474 | void* thisFrame = (void*)(&isGrowingDownward); | |
475 | ||
476 | isGrowingDownward = previousFrame < &thisFrame; | |
477 | static DWORD pageSize = 0; | |
478 | if (!pageSize) { | |
479 | SYSTEM_INFO systemInfo; | |
480 | GetSystemInfo(&systemInfo); | |
481 | pageSize = systemInfo.dwPageSize; | |
482 | } | |
483 | ||
484 | // scan all of memory starting from this frame, and return the last writeable page found | |
485 | register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1)); | |
486 | if (isGrowingDownward) { | |
487 | while (currentPage > 0) { | |
488 | // check for underflow | |
489 | if (currentPage >= (char*)pageSize) | |
490 | currentPage -= pageSize; | |
491 | else | |
492 | currentPage = 0; | |
493 | if (!isPageWritable(currentPage)) | |
494 | return currentPage + pageSize; | |
495 | } | |
496 | return 0; | |
497 | } else { | |
498 | while (true) { | |
499 | // guaranteed to complete because isPageWritable returns false at end of memory | |
500 | currentPage += pageSize; | |
501 | if (!isPageWritable(currentPage)) | |
502 | return currentPage; | |
503 | } | |
504 | } | |
505 | } | |
506 | #endif | |
507 | ||
f9bf01c6 A |
508 | #if OS(QNX) |
509 | static inline void *currentThreadStackBaseQNX() | |
510 | { | |
511 | static void* stackBase = 0; | |
512 | static size_t stackSize = 0; | |
513 | static pthread_t stackThread; | |
514 | pthread_t thread = pthread_self(); | |
515 | if (stackBase == 0 || thread != stackThread) { | |
516 | struct _debug_thread_info threadInfo; | |
517 | memset(&threadInfo, 0, sizeof(threadInfo)); | |
518 | threadInfo.tid = pthread_self(); | |
519 | int fd = open("/proc/self", O_RDONLY); | |
520 | if (fd == -1) { | |
521 | LOG_ERROR("Unable to open /proc/self (errno: %d)", errno); | |
522 | return 0; | |
523 | } | |
524 | devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0); | |
525 | close(fd); | |
526 | stackBase = reinterpret_cast<void*>(threadInfo.stkbase); | |
527 | stackSize = threadInfo.stksize; | |
528 | ASSERT(stackBase); | |
529 | stackThread = thread; | |
530 | } | |
531 | return static_cast<char*>(stackBase) + stackSize; | |
532 | } | |
533 | #endif | |
534 | ||
9dae56ea A |
535 | static inline void* currentThreadStackBase() |
536 | { | |
f9bf01c6 | 537 | #if OS(DARWIN) |
9dae56ea A |
538 | pthread_t thread = pthread_self(); |
539 | return pthread_get_stackaddr_np(thread); | |
f9bf01c6 | 540 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC) |
9dae56ea A |
541 | // offset 0x18 from the FS segment register gives a pointer to |
542 | // the thread information block for the current thread | |
543 | NT_TIB* pTib; | |
544 | __asm { | |
545 | MOV EAX, FS:[18h] | |
546 | MOV pTib, EAX | |
547 | } | |
548 | return static_cast<void*>(pTib->StackBase); | |
f9bf01c6 | 549 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC) |
9dae56ea A |
550 | // offset 0x18 from the FS segment register gives a pointer to |
551 | // the thread information block for the current thread | |
552 | NT_TIB* pTib; | |
553 | asm ( "movl %%fs:0x18, %0\n" | |
554 | : "=r" (pTib) | |
555 | ); | |
556 | return static_cast<void*>(pTib->StackBase); | |
4e4e5a6f A |
557 | #elif OS(WINDOWS) && CPU(X86_64) |
558 | PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); | |
559 | return reinterpret_cast<void*>(pTib->StackBase); | |
f9bf01c6 | 560 | #elif OS(QNX) |
4e4e5a6f A |
561 | AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
562 | MutexLocker locker(mutex); | |
f9bf01c6 A |
563 | return currentThreadStackBaseQNX(); |
564 | #elif OS(SOLARIS) | |
9dae56ea A |
565 | stack_t s; |
566 | thr_stksegment(&s); | |
567 | return s.ss_sp; | |
f9bf01c6 | 568 | #elif OS(OPENBSD) |
9dae56ea A |
569 | pthread_t thread = pthread_self(); |
570 | stack_t stack; | |
571 | pthread_stackseg_np(thread, &stack); | |
572 | return stack.ss_sp; | |
f9bf01c6 | 573 | #elif OS(SYMBIAN) |
4e4e5a6f A |
574 | TThreadStackInfo info; |
575 | RThread thread; | |
576 | thread.StackInfo(info); | |
577 | return (void*)info.iBase; | |
f9bf01c6 A |
578 | #elif OS(HAIKU) |
579 | thread_info threadInfo; | |
580 | get_thread_info(find_thread(NULL), &threadInfo); | |
581 | return threadInfo.stack_end; | |
582 | #elif OS(UNIX) | |
4e4e5a6f A |
583 | AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
584 | MutexLocker locker(mutex); | |
9dae56ea A |
585 | static void* stackBase = 0; |
586 | static size_t stackSize = 0; | |
587 | static pthread_t stackThread; | |
588 | pthread_t thread = pthread_self(); | |
589 | if (stackBase == 0 || thread != stackThread) { | |
590 | pthread_attr_t sattr; | |
591 | pthread_attr_init(&sattr); | |
f9bf01c6 | 592 | #if HAVE(PTHREAD_NP_H) || OS(NETBSD) |
9dae56ea A |
593 | // e.g. on FreeBSD 5.4, neundorf@kde.org |
594 | pthread_attr_get_np(thread, &sattr); | |
595 | #else | |
596 | // FIXME: this function is non-portable; other POSIX systems may have different np alternatives | |
597 | pthread_getattr_np(thread, &sattr); | |
598 | #endif | |
599 | int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize); | |
600 | (void)rc; // FIXME: Deal with error code somehow? Seems fatal. | |
601 | ASSERT(stackBase); | |
602 | pthread_attr_destroy(&sattr); | |
603 | stackThread = thread; | |
604 | } | |
605 | return static_cast<char*>(stackBase) + stackSize; | |
f9bf01c6 | 606 | #elif OS(WINCE) |
4e4e5a6f A |
607 | AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
608 | MutexLocker locker(mutex); | |
ba379fdc A |
609 | if (g_stackBase) |
610 | return g_stackBase; | |
611 | else { | |
612 | int dummy; | |
613 | return getStackBase(&dummy); | |
9dae56ea | 614 | } |
9dae56ea A |
615 | #else |
616 | #error Need a way to get the stack base on this platform | |
617 | #endif | |
618 | } | |
619 | ||
620 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
621 | ||
622 | static inline PlatformThread getCurrentPlatformThread() | |
623 | { | |
f9bf01c6 | 624 | #if OS(DARWIN) |
9dae56ea | 625 | return pthread_mach_thread_np(pthread_self()); |
f9bf01c6 A |
626 | #elif OS(WINDOWS) |
627 | return pthread_getw32threadhandle_np(pthread_self()); | |
9dae56ea A |
628 | #endif |
629 | } | |
630 | ||
631 | void Heap::makeUsableFromMultipleThreads() | |
632 | { | |
633 | if (m_currentThreadRegistrar) | |
634 | return; | |
635 | ||
636 | int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread); | |
637 | if (error) | |
638 | CRASH(); | |
639 | } | |
640 | ||
641 | void Heap::registerThread() | |
642 | { | |
4e4e5a6f | 643 | ASSERT(!m_globalData->exclusiveThread || m_globalData->exclusiveThread == currentThread()); |
f9bf01c6 | 644 | |
9dae56ea A |
645 | if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar)) |
646 | return; | |
647 | ||
648 | pthread_setspecific(m_currentThreadRegistrar, this); | |
649 | Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); | |
650 | ||
651 | MutexLocker lock(m_registeredThreadsMutex); | |
652 | ||
653 | thread->next = m_registeredThreads; | |
654 | m_registeredThreads = thread; | |
655 | } | |
656 | ||
657 | void Heap::unregisterThread(void* p) | |
658 | { | |
659 | if (p) | |
660 | static_cast<Heap*>(p)->unregisterThread(); | |
661 | } | |
662 | ||
663 | void Heap::unregisterThread() | |
664 | { | |
665 | pthread_t currentPosixThread = pthread_self(); | |
666 | ||
667 | MutexLocker lock(m_registeredThreadsMutex); | |
668 | ||
669 | if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) { | |
670 | Thread* t = m_registeredThreads; | |
671 | m_registeredThreads = m_registeredThreads->next; | |
672 | delete t; | |
673 | } else { | |
674 | Heap::Thread* last = m_registeredThreads; | |
675 | Heap::Thread* t; | |
676 | for (t = m_registeredThreads->next; t; t = t->next) { | |
677 | if (pthread_equal(t->posixThread, currentPosixThread)) { | |
678 | last->next = t->next; | |
679 | break; | |
680 | } | |
681 | last = t; | |
682 | } | |
683 | ASSERT(t); // If t is NULL, we never found ourselves in the list. | |
684 | delete t; | |
685 | } | |
686 | } | |
687 | ||
688 | #else // ENABLE(JSC_MULTIPLE_THREADS) | |
689 | ||
690 | void Heap::registerThread() | |
691 | { | |
692 | } | |
693 | ||
694 | #endif | |
695 | ||
f9bf01c6 A |
696 | inline bool isPointerAligned(void* p) |
697 | { | |
698 | return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0); | |
699 | } | |
700 | ||
701 | // Cell size needs to be a power of two for isPossibleCell to be valid. | |
702 | COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two); | |
703 | ||
704 | #if USE(JSVALUE32) | |
705 | static bool isHalfCellAligned(void *p) | |
706 | { | |
707 | return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0); | |
708 | } | |
709 | ||
710 | static inline bool isPossibleCell(void* p) | |
711 | { | |
712 | return isHalfCellAligned(p) && p; | |
713 | } | |
714 | ||
715 | #else | |
716 | ||
717 | static inline bool isCellAligned(void *p) | |
718 | { | |
719 | return (((intptr_t)(p) & CELL_MASK) == 0); | |
720 | } | |
9dae56ea | 721 | |
f9bf01c6 A |
722 | static inline bool isPossibleCell(void* p) |
723 | { | |
724 | return isCellAligned(p) && p; | |
725 | } | |
726 | #endif // USE(JSVALUE32) | |
9dae56ea | 727 | |
f9bf01c6 | 728 | void Heap::markConservatively(MarkStack& markStack, void* start, void* end) |
9dae56ea A |
729 | { |
730 | if (start > end) { | |
731 | void* tmp = start; | |
732 | start = end; | |
733 | end = tmp; | |
734 | } | |
735 | ||
736 | ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000); | |
f9bf01c6 A |
737 | ASSERT(isPointerAligned(start)); |
738 | ASSERT(isPointerAligned(end)); | |
9dae56ea A |
739 | |
740 | char** p = static_cast<char**>(start); | |
741 | char** e = static_cast<char**>(end); | |
742 | ||
f9bf01c6 | 743 | CollectorBlock** blocks = m_heap.blocks; |
9dae56ea A |
744 | while (p != e) { |
745 | char* x = *p++; | |
f9bf01c6 A |
746 | if (isPossibleCell(x)) { |
747 | size_t usedBlocks; | |
9dae56ea A |
748 | uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); |
749 | xAsBits &= CELL_ALIGN_MASK; | |
f9bf01c6 | 750 | |
9dae56ea | 751 | uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; |
f9bf01c6 A |
752 | const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); |
753 | if (offset > lastCellOffset) | |
754 | continue; | |
755 | ||
9dae56ea | 756 | CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); |
f9bf01c6 A |
757 | usedBlocks = m_heap.usedBlocks; |
758 | for (size_t block = 0; block < usedBlocks; block++) { | |
759 | if (blocks[block] != blockAddr) | |
760 | continue; | |
761 | markStack.append(reinterpret_cast<JSCell*>(xAsBits)); | |
762 | markStack.drain(); | |
9dae56ea | 763 | } |
9dae56ea A |
764 | } |
765 | } | |
766 | } | |
767 | ||
f9bf01c6 | 768 | void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack) |
9dae56ea A |
769 | { |
770 | void* dummy; | |
771 | void* stackPointer = &dummy; | |
772 | void* stackBase = currentThreadStackBase(); | |
f9bf01c6 | 773 | markConservatively(markStack, stackPointer, stackBase); |
9dae56ea A |
774 | } |
775 | ||
f9bf01c6 A |
776 | #if COMPILER(GCC) |
777 | #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*)))) | |
778 | #else | |
779 | #define REGISTER_BUFFER_ALIGNMENT | |
780 | #endif | |
781 | ||
782 | void Heap::markCurrentThreadConservatively(MarkStack& markStack) | |
9dae56ea A |
783 | { |
784 | // setjmp forces volatile registers onto the stack | |
f9bf01c6 | 785 | jmp_buf registers REGISTER_BUFFER_ALIGNMENT; |
9dae56ea A |
786 | #if COMPILER(MSVC) |
787 | #pragma warning(push) | |
788 | #pragma warning(disable: 4611) | |
789 | #endif | |
790 | setjmp(registers); | |
791 | #if COMPILER(MSVC) | |
792 | #pragma warning(pop) | |
793 | #endif | |
794 | ||
f9bf01c6 | 795 | markCurrentThreadConservativelyInternal(markStack); |
9dae56ea A |
796 | } |
797 | ||
798 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
799 | ||
800 | static inline void suspendThread(const PlatformThread& platformThread) | |
801 | { | |
f9bf01c6 | 802 | #if OS(DARWIN) |
9dae56ea | 803 | thread_suspend(platformThread); |
f9bf01c6 A |
804 | #elif OS(WINDOWS) |
805 | SuspendThread(platformThread); | |
9dae56ea A |
806 | #else |
807 | #error Need a way to suspend threads on this platform | |
808 | #endif | |
809 | } | |
810 | ||
811 | static inline void resumeThread(const PlatformThread& platformThread) | |
812 | { | |
f9bf01c6 | 813 | #if OS(DARWIN) |
9dae56ea | 814 | thread_resume(platformThread); |
f9bf01c6 A |
815 | #elif OS(WINDOWS) |
816 | ResumeThread(platformThread); | |
9dae56ea A |
817 | #else |
818 | #error Need a way to resume threads on this platform | |
819 | #endif | |
820 | } | |
821 | ||
822 | typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit | |
823 | ||
f9bf01c6 | 824 | #if OS(DARWIN) |
9dae56ea | 825 | |
f9bf01c6 | 826 | #if CPU(X86) |
9dae56ea | 827 | typedef i386_thread_state_t PlatformThreadRegisters; |
f9bf01c6 | 828 | #elif CPU(X86_64) |
9dae56ea | 829 | typedef x86_thread_state64_t PlatformThreadRegisters; |
f9bf01c6 | 830 | #elif CPU(PPC) |
9dae56ea | 831 | typedef ppc_thread_state_t PlatformThreadRegisters; |
f9bf01c6 | 832 | #elif CPU(PPC64) |
9dae56ea | 833 | typedef ppc_thread_state64_t PlatformThreadRegisters; |
f9bf01c6 | 834 | #elif CPU(ARM) |
9dae56ea A |
835 | typedef arm_thread_state_t PlatformThreadRegisters; |
836 | #else | |
837 | #error Unknown Architecture | |
838 | #endif | |
839 | ||
f9bf01c6 | 840 | #elif OS(WINDOWS) && CPU(X86) |
9dae56ea A |
841 | typedef CONTEXT PlatformThreadRegisters; |
842 | #else | |
843 | #error Need a thread register struct for this platform | |
844 | #endif | |
845 | ||
846 | static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs) | |
847 | { | |
f9bf01c6 | 848 | #if OS(DARWIN) |
9dae56ea | 849 | |
f9bf01c6 | 850 | #if CPU(X86) |
9dae56ea A |
851 | unsigned user_count = sizeof(regs)/sizeof(int); |
852 | thread_state_flavor_t flavor = i386_THREAD_STATE; | |
f9bf01c6 | 853 | #elif CPU(X86_64) |
9dae56ea A |
854 | unsigned user_count = x86_THREAD_STATE64_COUNT; |
855 | thread_state_flavor_t flavor = x86_THREAD_STATE64; | |
f9bf01c6 | 856 | #elif CPU(PPC) |
9dae56ea A |
857 | unsigned user_count = PPC_THREAD_STATE_COUNT; |
858 | thread_state_flavor_t flavor = PPC_THREAD_STATE; | |
f9bf01c6 | 859 | #elif CPU(PPC64) |
9dae56ea A |
860 | unsigned user_count = PPC_THREAD_STATE64_COUNT; |
861 | thread_state_flavor_t flavor = PPC_THREAD_STATE64; | |
f9bf01c6 | 862 | #elif CPU(ARM) |
9dae56ea A |
863 | unsigned user_count = ARM_THREAD_STATE_COUNT; |
864 | thread_state_flavor_t flavor = ARM_THREAD_STATE; | |
865 | #else | |
866 | #error Unknown Architecture | |
867 | #endif | |
868 | ||
869 | kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); | |
870 | if (result != KERN_SUCCESS) { | |
871 | WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, | |
872 | "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); | |
873 | CRASH(); | |
874 | } | |
875 | return user_count * sizeof(usword_t); | |
f9bf01c6 | 876 | // end OS(DARWIN) |
9dae56ea | 877 | |
f9bf01c6 | 878 | #elif OS(WINDOWS) && CPU(X86) |
9dae56ea | 879 | regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; |
f9bf01c6 | 880 | GetThreadContext(platformThread, ®s); |
9dae56ea A |
881 | return sizeof(CONTEXT); |
882 | #else | |
883 | #error Need a way to get thread registers on this platform | |
884 | #endif | |
885 | } | |
886 | ||
887 | static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs) | |
888 | { | |
f9bf01c6 | 889 | #if OS(DARWIN) |
9dae56ea A |
890 | |
891 | #if __DARWIN_UNIX03 | |
892 | ||
f9bf01c6 | 893 | #if CPU(X86) |
9dae56ea | 894 | return reinterpret_cast<void*>(regs.__esp); |
f9bf01c6 | 895 | #elif CPU(X86_64) |
9dae56ea | 896 | return reinterpret_cast<void*>(regs.__rsp); |
f9bf01c6 | 897 | #elif CPU(PPC) || CPU(PPC64) |
9dae56ea | 898 | return reinterpret_cast<void*>(regs.__r1); |
f9bf01c6 | 899 | #elif CPU(ARM) |
9dae56ea A |
900 | return reinterpret_cast<void*>(regs.__sp); |
901 | #else | |
902 | #error Unknown Architecture | |
903 | #endif | |
904 | ||
905 | #else // !__DARWIN_UNIX03 | |
906 | ||
f9bf01c6 | 907 | #if CPU(X86) |
9dae56ea | 908 | return reinterpret_cast<void*>(regs.esp); |
f9bf01c6 | 909 | #elif CPU(X86_64) |
9dae56ea | 910 | return reinterpret_cast<void*>(regs.rsp); |
f9bf01c6 | 911 | #elif CPU(PPC) || CPU(PPC64) |
9dae56ea A |
912 | return reinterpret_cast<void*>(regs.r1); |
913 | #else | |
914 | #error Unknown Architecture | |
915 | #endif | |
916 | ||
917 | #endif // __DARWIN_UNIX03 | |
918 | ||
f9bf01c6 A |
919 | // end OS(DARWIN) |
920 | #elif CPU(X86) && OS(WINDOWS) | |
9dae56ea A |
921 | return reinterpret_cast<void*>((uintptr_t) regs.Esp); |
922 | #else | |
923 | #error Need a way to get the stack pointer for another thread on this platform | |
924 | #endif | |
925 | } | |
926 | ||
f9bf01c6 | 927 | void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread) |
9dae56ea A |
928 | { |
929 | suspendThread(thread->platformThread); | |
930 | ||
931 | PlatformThreadRegisters regs; | |
932 | size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); | |
933 | ||
934 | // mark the thread's registers | |
f9bf01c6 | 935 | markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); |
9dae56ea A |
936 | |
937 | void* stackPointer = otherThreadStackPointer(regs); | |
f9bf01c6 | 938 | markConservatively(markStack, stackPointer, thread->stackBase); |
9dae56ea A |
939 | |
940 | resumeThread(thread->platformThread); | |
941 | } | |
942 | ||
943 | #endif | |
944 | ||
f9bf01c6 | 945 | void Heap::markStackObjectsConservatively(MarkStack& markStack) |
9dae56ea | 946 | { |
f9bf01c6 | 947 | markCurrentThreadConservatively(markStack); |
9dae56ea A |
948 | |
949 | #if ENABLE(JSC_MULTIPLE_THREADS) | |
950 | ||
951 | if (m_currentThreadRegistrar) { | |
952 | ||
953 | MutexLocker lock(m_registeredThreadsMutex); | |
954 | ||
955 | #ifndef NDEBUG | |
956 | // Forbid malloc during the mark phase. Marking a thread suspends it, so | |
f9bf01c6 | 957 | // a malloc inside markChildren() would risk a deadlock with a thread that had been |
9dae56ea A |
958 | // suspended while holding the malloc lock. |
959 | fastMallocForbid(); | |
960 | #endif | |
961 | // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held, | |
962 | // and since this is a shared heap, they are real locks. | |
963 | for (Thread* thread = m_registeredThreads; thread; thread = thread->next) { | |
964 | if (!pthread_equal(thread->posixThread, pthread_self())) | |
f9bf01c6 | 965 | markOtherThreadConservatively(markStack, thread); |
9dae56ea A |
966 | } |
967 | #ifndef NDEBUG | |
968 | fastMallocAllow(); | |
969 | #endif | |
970 | } | |
971 | #endif | |
972 | } | |
973 | ||
ba379fdc | 974 | void Heap::protect(JSValue k) |
9dae56ea A |
975 | { |
976 | ASSERT(k); | |
4e4e5a6f | 977 | ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); |
9dae56ea A |
978 | |
979 | if (!k.isCell()) | |
980 | return; | |
981 | ||
9dae56ea | 982 | m_protectedValues.add(k.asCell()); |
9dae56ea A |
983 | } |
984 | ||
4e4e5a6f | 985 | bool Heap::unprotect(JSValue k) |
9dae56ea A |
986 | { |
987 | ASSERT(k); | |
4e4e5a6f | 988 | ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); |
9dae56ea A |
989 | |
990 | if (!k.isCell()) | |
4e4e5a6f | 991 | return false; |
9dae56ea | 992 | |
4e4e5a6f | 993 | return m_protectedValues.remove(k.asCell()); |
f9bf01c6 | 994 | } |
9dae56ea | 995 | |
f9bf01c6 A |
996 | void Heap::markProtectedObjects(MarkStack& markStack) |
997 | { | |
998 | ProtectCountSet::iterator end = m_protectedValues.end(); | |
999 | for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) { | |
1000 | markStack.append(it->first); | |
1001 | markStack.drain(); | |
1002 | } | |
9dae56ea A |
1003 | } |
1004 | ||
f9bf01c6 | 1005 | void Heap::clearMarkBits() |
9dae56ea | 1006 | { |
f9bf01c6 A |
1007 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) |
1008 | clearMarkBits(m_heap.blocks[i]); | |
9dae56ea A |
1009 | } |
1010 | ||
f9bf01c6 | 1011 | void Heap::clearMarkBits(CollectorBlock* block) |
9dae56ea | 1012 | { |
f9bf01c6 A |
1013 | // allocate assumes that the last cell in every block is marked. |
1014 | block->marked.clearAll(); | |
1015 | block->marked.set(HeapConstants::cellsPerBlock - 1); | |
1016 | } | |
9dae56ea | 1017 | |
f9bf01c6 A |
1018 | size_t Heap::markedCells(size_t startBlock, size_t startCell) const |
1019 | { | |
1020 | ASSERT(startBlock <= m_heap.usedBlocks); | |
1021 | ASSERT(startCell < HeapConstants::cellsPerBlock); | |
1022 | ||
1023 | if (startBlock >= m_heap.usedBlocks) | |
1024 | return 0; | |
1025 | ||
1026 | size_t result = 0; | |
1027 | result += m_heap.blocks[startBlock]->marked.count(startCell); | |
1028 | for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i) | |
1029 | result += m_heap.blocks[i]->marked.count(); | |
9dae56ea | 1030 | |
f9bf01c6 | 1031 | return result; |
9dae56ea A |
1032 | } |
1033 | ||
f9bf01c6 | 1034 | void Heap::sweep() |
9dae56ea | 1035 | { |
f9bf01c6 A |
1036 | ASSERT(m_heap.operationInProgress == NoOperation); |
1037 | if (m_heap.operationInProgress != NoOperation) | |
1038 | CRASH(); | |
1039 | m_heap.operationInProgress = Collection; | |
9dae56ea | 1040 | |
f9bf01c6 A |
1041 | #if !ENABLE(JSC_ZOMBIES) |
1042 | Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); | |
9dae56ea | 1043 | #endif |
f9bf01c6 A |
1044 | |
1045 | DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); | |
1046 | DeadObjectIterator end(m_heap, m_heap.usedBlocks); | |
1047 | for ( ; it != end; ++it) { | |
1048 | JSCell* cell = *it; | |
1049 | #if ENABLE(JSC_ZOMBIES) | |
1050 | if (!cell->isZombie()) { | |
1051 | const ClassInfo* info = cell->classInfo(); | |
1052 | cell->~JSCell(); | |
1053 | new (cell) JSZombie(info, JSZombie::leakedZombieStructure()); | |
1054 | Heap::markCell(cell); | |
9dae56ea | 1055 | } |
f9bf01c6 A |
1056 | #else |
1057 | cell->~JSCell(); | |
1058 | // Callers of sweep assume it's safe to mark any cell in the heap. | |
1059 | new (cell) JSCell(dummyMarkableCellStructure); | |
1060 | #endif | |
9dae56ea | 1061 | } |
f9bf01c6 A |
1062 | |
1063 | m_heap.operationInProgress = NoOperation; | |
9dae56ea | 1064 | } |
f9bf01c6 A |
1065 | |
1066 | void Heap::markRoots() | |
9dae56ea A |
1067 | { |
1068 | #ifndef NDEBUG | |
4e4e5a6f | 1069 | if (m_globalData->isSharedInstance()) { |
9dae56ea A |
1070 | ASSERT(JSLock::lockCount() > 0); |
1071 | ASSERT(JSLock::currentThreadIsHoldingLock()); | |
1072 | } | |
1073 | #endif | |
1074 | ||
f9bf01c6 A |
1075 | ASSERT(m_heap.operationInProgress == NoOperation); |
1076 | if (m_heap.operationInProgress != NoOperation) | |
9dae56ea A |
1077 | CRASH(); |
1078 | ||
f9bf01c6 A |
1079 | m_heap.operationInProgress = Collection; |
1080 | ||
1081 | MarkStack& markStack = m_globalData->markStack; | |
1082 | ||
1083 | // Reset mark bits. | |
1084 | clearMarkBits(); | |
9dae56ea | 1085 | |
f9bf01c6 A |
1086 | // Mark stack roots. |
1087 | markStackObjectsConservatively(markStack); | |
1088 | m_globalData->interpreter->registerFile().markCallFrames(markStack, this); | |
9dae56ea | 1089 | |
f9bf01c6 A |
1090 | // Mark explicitly registered roots. |
1091 | markProtectedObjects(markStack); | |
1092 | ||
1093 | // Mark misc. other roots. | |
9dae56ea | 1094 | if (m_markListSet && m_markListSet->size()) |
f9bf01c6 A |
1095 | MarkedArgumentBuffer::markLists(markStack, *m_markListSet); |
1096 | if (m_globalData->exception) | |
1097 | markStack.append(m_globalData->exception); | |
1098 | if (m_globalData->functionCodeBlockBeingReparsed) | |
1099 | m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack); | |
ba379fdc | 1100 | if (m_globalData->firstStringifierToMark) |
f9bf01c6 | 1101 | JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); |
9dae56ea | 1102 | |
f9bf01c6 A |
1103 | // Mark the small strings cache last, since it will clear itself if nothing |
1104 | // else has marked it. | |
1105 | m_globalData->smallStrings.markChildren(markStack); | |
9dae56ea | 1106 | |
f9bf01c6 A |
1107 | markStack.drain(); |
1108 | markStack.compact(); | |
9dae56ea | 1109 | |
f9bf01c6 | 1110 | m_heap.operationInProgress = NoOperation; |
9dae56ea A |
1111 | } |
1112 | ||
f9bf01c6 | 1113 | size_t Heap::objectCount() const |
9dae56ea | 1114 | { |
f9bf01c6 A |
1115 | return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks |
1116 | + m_heap.nextCell // allocated cells in current block | |
1117 | + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap | |
1118 | - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel | |
9dae56ea A |
1119 | } |
1120 | ||
f9bf01c6 | 1121 | void Heap::addToStatistics(Heap::Statistics& statistics) const |
9dae56ea | 1122 | { |
f9bf01c6 A |
1123 | statistics.size += m_heap.usedBlocks * BLOCK_SIZE; |
1124 | statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize); | |
9dae56ea A |
1125 | } |
1126 | ||
1127 | Heap::Statistics Heap::statistics() const | |
1128 | { | |
1129 | Statistics statistics = { 0, 0 }; | |
f9bf01c6 | 1130 | addToStatistics(statistics); |
9dae56ea A |
1131 | return statistics; |
1132 | } | |
1133 | ||
1134 | size_t Heap::globalObjectCount() | |
1135 | { | |
1136 | size_t count = 0; | |
1137 | if (JSGlobalObject* head = m_globalData->head) { | |
1138 | JSGlobalObject* o = head; | |
1139 | do { | |
1140 | ++count; | |
1141 | o = o->next(); | |
1142 | } while (o != head); | |
1143 | } | |
1144 | return count; | |
1145 | } | |
1146 | ||
1147 | size_t Heap::protectedGlobalObjectCount() | |
1148 | { | |
9dae56ea A |
1149 | size_t count = 0; |
1150 | if (JSGlobalObject* head = m_globalData->head) { | |
1151 | JSGlobalObject* o = head; | |
1152 | do { | |
1153 | if (m_protectedValues.contains(o)) | |
1154 | ++count; | |
1155 | o = o->next(); | |
1156 | } while (o != head); | |
1157 | } | |
1158 | ||
9dae56ea A |
1159 | return count; |
1160 | } | |
1161 | ||
1162 | size_t Heap::protectedObjectCount() | |
1163 | { | |
f9bf01c6 | 1164 | return m_protectedValues.size(); |
9dae56ea A |
1165 | } |
1166 | ||
1167 | static const char* typeName(JSCell* cell) | |
1168 | { | |
1169 | if (cell->isString()) | |
1170 | return "string"; | |
ba379fdc | 1171 | #if USE(JSVALUE32) |
9dae56ea A |
1172 | if (cell->isNumber()) |
1173 | return "number"; | |
ba379fdc | 1174 | #endif |
9dae56ea | 1175 | if (cell->isGetterSetter()) |
4e4e5a6f | 1176 | return "Getter-Setter"; |
f9bf01c6 | 1177 | if (cell->isAPIValueWrapper()) |
4e4e5a6f | 1178 | return "API wrapper"; |
f9bf01c6 | 1179 | if (cell->isPropertyNameIterator()) |
4e4e5a6f A |
1180 | return "For-in iterator"; |
1181 | if (!cell->isObject()) | |
1182 | return "[empty cell]"; | |
f9bf01c6 | 1183 | const ClassInfo* info = cell->classInfo(); |
9dae56ea A |
1184 | return info ? info->className : "Object"; |
1185 | } | |
1186 | ||
1187 | HashCountedSet<const char*>* Heap::protectedObjectTypeCounts() | |
1188 | { | |
1189 | HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; | |
1190 | ||
9dae56ea A |
1191 | ProtectCountSet::iterator end = m_protectedValues.end(); |
1192 | for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) | |
1193 | counts->add(typeName(it->first)); | |
1194 | ||
9dae56ea A |
1195 | return counts; |
1196 | } | |
1197 | ||
4e4e5a6f A |
1198 | HashCountedSet<const char*>* Heap::objectTypeCounts() |
1199 | { | |
1200 | HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; | |
1201 | ||
1202 | LiveObjectIterator it = primaryHeapBegin(); | |
1203 | LiveObjectIterator heapEnd = primaryHeapEnd(); | |
1204 | for ( ; it != heapEnd; ++it) | |
1205 | counts->add(typeName(*it)); | |
1206 | ||
1207 | return counts; | |
1208 | } | |
1209 | ||
9dae56ea A |
1210 | bool Heap::isBusy() |
1211 | { | |
f9bf01c6 A |
1212 | return m_heap.operationInProgress != NoOperation; |
1213 | } | |
1214 | ||
1215 | void Heap::reset() | |
1216 | { | |
1217 | JAVASCRIPTCORE_GC_BEGIN(); | |
1218 | ||
1219 | markRoots(); | |
1220 | ||
1221 | JAVASCRIPTCORE_GC_MARKED(); | |
1222 | ||
1223 | m_heap.nextCell = 0; | |
1224 | m_heap.nextBlock = 0; | |
1225 | m_heap.nextNumber = 0; | |
1226 | m_heap.extraCost = 0; | |
1227 | #if ENABLE(JSC_ZOMBIES) | |
1228 | sweep(); | |
1229 | #endif | |
1230 | resizeBlocks(); | |
1231 | ||
1232 | JAVASCRIPTCORE_GC_END(); | |
1233 | } | |
1234 | ||
1235 | void Heap::collectAllGarbage() | |
1236 | { | |
1237 | JAVASCRIPTCORE_GC_BEGIN(); | |
1238 | ||
1239 | // If the last iteration through the heap deallocated blocks, we need | |
1240 | // to clean up remaining garbage before marking. Otherwise, the conservative | |
1241 | // marking mechanism might follow a pointer to unmapped memory. | |
1242 | if (m_heap.didShrink) | |
1243 | sweep(); | |
1244 | ||
1245 | markRoots(); | |
1246 | ||
1247 | JAVASCRIPTCORE_GC_MARKED(); | |
1248 | ||
1249 | m_heap.nextCell = 0; | |
1250 | m_heap.nextBlock = 0; | |
1251 | m_heap.nextNumber = 0; | |
1252 | m_heap.extraCost = 0; | |
1253 | sweep(); | |
1254 | resizeBlocks(); | |
1255 | ||
1256 | JAVASCRIPTCORE_GC_END(); | |
9dae56ea A |
1257 | } |
1258 | ||
f9bf01c6 | 1259 | LiveObjectIterator Heap::primaryHeapBegin() |
9dae56ea | 1260 | { |
f9bf01c6 | 1261 | return LiveObjectIterator(m_heap, 0); |
9dae56ea A |
1262 | } |
1263 | ||
f9bf01c6 | 1264 | LiveObjectIterator Heap::primaryHeapEnd() |
9dae56ea | 1265 | { |
f9bf01c6 | 1266 | return LiveObjectIterator(m_heap, m_heap.usedBlocks); |
9dae56ea A |
1267 | } |
1268 | ||
1269 | } // namespace JSC |