]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - heap/MarkedAllocator.cpp
JavaScriptCore-7600.1.4.9.tar.gz
[apple/javascriptcore.git] / heap / MarkedAllocator.cpp
index cbdbfd5321a22c4fd4f4d4cdb03f1409aaeacb09..b8f01fd187920f36053a948adf6009468839c3ea 100644 (file)
@@ -1,18 +1,45 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
 #include "config.h"
 #include "MarkedAllocator.h"
 
+#include "DelayedReleaseScope.h"
 #include "GCActivityCallback.h"
 #include "Heap.h"
 #include "IncrementalSweeper.h"
+#include "JSCInlines.h"
 #include "VM.h"
 #include <wtf/CurrentTime.h>
 
 namespace JSC {
 
-bool MarkedAllocator::isPagedOut(double deadline)
+static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
 {
     unsigned itersSinceLastTimeCheck = 0;
-    MarkedBlock* block = m_blockList.head();
+    MarkedBlock* block = list.head();
     while (block) {
         block = block->next();
         ++itersSinceLastTimeCheck;
@@ -23,22 +50,46 @@ bool MarkedAllocator::isPagedOut(double deadline)
             itersSinceLastTimeCheck = 0;
         }
     }
+    return false;
+}
 
+bool MarkedAllocator::isPagedOut(double deadline)
+{
+    if (isListPagedOut(deadline, m_blockList))
+        return true;
     return false;
 }
 
 inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
 {
-    if (!m_freeList.head) {
-        for (MarkedBlock*& block = m_blocksToSweep; block; block = block->next()) {
+    // We need a while loop to check the free list because the DelayedReleaseScope 
+    // could cause arbitrary code to execute and exhaust the free list that we 
+    // thought had elements in it.
+    while (!m_freeList.head) {
+        DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
+        if (m_currentBlock) {
+            ASSERT(m_currentBlock == m_nextBlockToSweep);
+            m_currentBlock->didConsumeFreeList();
+            m_nextBlockToSweep = m_currentBlock->next();
+        }
+
+        MarkedBlock* next;
+        for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
+            next = block->next();
+
             MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
-            if (!freeList.head) {
-                block->didConsumeFreeList();
+            
+            double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize;
+            if (utilization >= Options::minMarkedBlockUtilization()) {
+                ASSERT(freeList.bytes || !freeList.head);
+                m_blockList.remove(block);
+                m_retiredBlocks.push(block);
+                block->didRetireBlock(freeList);
                 continue;
             }
 
             if (bytes > block->cellSize()) {
-                block->canonicalizeCellLivenessData(freeList);
+                block->stopAllocating(freeList);
                 continue;
             }
 
@@ -52,30 +103,68 @@ inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
             return 0;
         }
     }
-    
+
+    ASSERT(m_freeList.head);
+    void* head = tryPopFreeList(bytes);
+    ASSERT(head);
+    m_markedSpace->didAllocateInBlock(m_currentBlock);
+    return head;
+}
+
+inline void* MarkedAllocator::tryPopFreeList(size_t bytes)
+{
+    ASSERT(m_currentBlock);
+    if (bytes > m_currentBlock->cellSize())
+        return 0;
+
     MarkedBlock::FreeCell* head = m_freeList.head;
     m_freeList.head = head->next;
-    ASSERT(head);
     return head;
 }
-    
+
 inline void* MarkedAllocator::tryAllocate(size_t bytes)
 {
     ASSERT(!m_heap->isBusy());
     m_heap->m_operationInProgress = Allocation;
     void* result = tryAllocateHelper(bytes);
+
+    // Due to the DelayedReleaseScope in tryAllocateHelper, some other thread might have
+    // created a new block after we thought we didn't find any free cells. 
+    while (!result && m_currentBlock) {
+        // A new block was added by another thread so try popping the free list.
+        result = tryPopFreeList(bytes);
+        if (result)
+            break;
+        // The free list was empty, so call tryAllocateHelper to do the normal sweeping stuff.
+        result = tryAllocateHelper(bytes);
+    }
+
     m_heap->m_operationInProgress = NoOperation;
+    ASSERT(result || !m_currentBlock);
     return result;
 }
-    
+
+ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded()
+{
+    if (!Options::slowPathAllocsBetweenGCs())
+        return;
+
+    static unsigned allocationCount = 0;
+    if (!allocationCount) {
+        if (!m_heap->isDeferred())
+            m_heap->collectAllGarbage();
+        ASSERT(m_heap->m_operationInProgress == NoOperation);
+    }
+    if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
+        allocationCount = 0;
+}
+
 void* MarkedAllocator::allocateSlowCase(size_t bytes)
 {
-    ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock());
-#if COLLECT_ON_EVERY_ALLOCATION
-    m_heap->collectAllGarbage();
-    ASSERT(m_heap->m_operationInProgress == NoOperation);
-#endif
-    
+    ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
+    doTestCollectionsIfNeeded();
+
+    ASSERT(!m_markedSpace->isIterating());
     ASSERT(!m_freeList.head);
     m_heap->didAllocate(m_freeList.bytes);
     
@@ -84,9 +173,7 @@ void* MarkedAllocator::allocateSlowCase(size_t bytes)
     if (LIKELY(result != 0))
         return result;
     
-    if (m_heap->shouldCollect()) {
-        m_heap->collect(Heap::DoNotSweep);
-
+    if (m_heap->collectIfNecessaryOrDefer()) {
         result = tryAllocate(bytes);
         if (result)
             return result;
@@ -122,8 +209,7 @@ void MarkedAllocator::addBlock(MarkedBlock* block)
     ASSERT(!m_freeList.head);
     
     m_blockList.append(block);
-    m_blocksToSweep = m_currentBlock = block;
-    m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
+    m_nextBlockToSweep = block;
     m_markedSpace->didAddBlock(block);
 }
 
@@ -133,9 +219,33 @@ void MarkedAllocator::removeBlock(MarkedBlock* block)
         m_currentBlock = m_currentBlock->next();
         m_freeList = MarkedBlock::FreeList();
     }
-    if (m_blocksToSweep == block)
-        m_blocksToSweep = m_blocksToSweep->next();
+    if (m_nextBlockToSweep == block)
+        m_nextBlockToSweep = m_nextBlockToSweep->next();
+
+    block->willRemoveBlock();
     m_blockList.remove(block);
 }
 
+void MarkedAllocator::reset()
+{
+    m_lastActiveBlock = 0;
+    m_currentBlock = 0;
+    m_freeList = MarkedBlock::FreeList();
+    if (m_heap->operationInProgress() == FullCollection)
+        m_blockList.append(m_retiredBlocks);
+
+    m_nextBlockToSweep = m_blockList.head();
+}
+
+struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
+    void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
+};
+
+void MarkedAllocator::lastChanceToFinalize()
+{
+    m_blockList.append(m_retiredBlocks);
+    LastChanceToFinalize functor;
+    forEachBlock(functor);
+}
+
 } // namespace JSC