]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - heap/GCThreadSharedData.cpp
JavaScriptCore-7600.1.4.13.1.tar.gz
[apple/javascriptcore.git] / heap / GCThreadSharedData.cpp
index b39ab5763f2d13d4b388dc849044cd3b1aa15d84..27e59e8ec90204655d3cf2dab2fec75351fdd7b1 100644 (file)
 #include "CopyVisitor.h"
 #include "CopyVisitorInlines.h"
 #include "GCThread.h"
-#include "VM.h"
 #include "MarkStack.h"
+#include "JSCInlines.h"
 #include "SlotVisitor.h"
 #include "SlotVisitorInlines.h"
+#include "VM.h"
 
 namespace JSC {
 
@@ -50,6 +51,22 @@ size_t GCThreadSharedData::childVisitCount()
         result += m_gcThreads[i]->slotVisitor()->visitCount();
     return result;
 }
+
+size_t GCThreadSharedData::childBytesVisited()
+{       
+    size_t result = 0;
+    for (unsigned i = 0; i < m_gcThreads.size(); ++i)
+        result += m_gcThreads[i]->slotVisitor()->bytesVisited();
+    return result;
+}
+
+size_t GCThreadSharedData::childBytesCopied()
+{       
+    size_t result = 0;
+    for (unsigned i = 0; i < m_gcThreads.size(); ++i)
+        result += m_gcThreads[i]->slotVisitor()->bytesCopied();
+    return result;
+}
 #endif
 
 GCThreadSharedData::GCThreadSharedData(VM* vm)
@@ -67,7 +84,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
     m_copyLock.Init();
 #if ENABLE(PARALLEL_GC)
     // Grab the lock so the new GC threads can be properly initialized before they start running.
-    MutexLocker locker(m_phaseLock);
+    std::unique_lock<std::mutex> lock(m_phaseMutex);
     for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
         m_numberOfActiveGCThreads++;
         SlotVisitor* slotVisitor = new SlotVisitor(*this);
@@ -79,8 +96,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
     }
 
     // Wait for all the GCThreads to get to the right place.
-    while (m_numberOfActiveGCThreads)
-        m_activityCondition.wait(m_phaseLock);
+    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
 #endif
 }
 
@@ -89,13 +105,13 @@ GCThreadSharedData::~GCThreadSharedData()
 #if ENABLE(PARALLEL_GC)    
     // Destroy our marking threads.
     {
-        MutexLocker markingLocker(m_markingLock);
-        MutexLocker phaseLocker(m_phaseLock);
+        std::lock_guard<std::mutex> markingLock(m_markingMutex);
+        std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
         ASSERT(m_currentPhase == NoPhase);
         m_parallelMarkersShouldExit = true;
         m_gcThreadsShouldWait = false;
         m_currentPhase = Exit;
-        m_phaseCondition.broadcast();
+        m_phaseConditionVariable.notify_all();
     }
     for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
         waitForThreadCompletion(m_gcThreads[i]->threadID());
@@ -103,16 +119,11 @@ GCThreadSharedData::~GCThreadSharedData()
     }
 #endif
 }
-    
+
 void GCThreadSharedData::reset()
 {
     ASSERT(m_sharedMarkStack.isEmpty());
     
-#if ENABLE(PARALLEL_GC)
-    m_opaqueRoots.clear();
-#else
-    ASSERT(m_opaqueRoots.isEmpty());
-#endif
     m_weakReferenceHarvesters.removeAll();
 
     if (m_shouldHashCons) {
@@ -123,28 +134,34 @@ void GCThreadSharedData::reset()
 
 void GCThreadSharedData::startNextPhase(GCPhase phase)
 {
-    MutexLocker phaseLocker(m_phaseLock);
+    std::lock_guard<std::mutex> lock(m_phaseMutex);
     ASSERT(!m_gcThreadsShouldWait);
     ASSERT(m_currentPhase == NoPhase);
     m_gcThreadsShouldWait = true;
     m_currentPhase = phase;
-    m_phaseCondition.broadcast();
+    m_phaseConditionVariable.notify_all();
 }
 
 void GCThreadSharedData::endCurrentPhase()
 {
     ASSERT(m_gcThreadsShouldWait);
-    MutexLocker locker(m_phaseLock);
+    std::unique_lock<std::mutex> lock(m_phaseMutex);
     m_currentPhase = NoPhase;
     m_gcThreadsShouldWait = false;
-    m_phaseCondition.broadcast();
-    while (m_numberOfActiveGCThreads)
-        m_activityCondition.wait(m_phaseLock);
+    m_phaseConditionVariable.notify_all();
+    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
 }
 
 void GCThreadSharedData::didStartMarking()
 {
-    MutexLocker markingLocker(m_markingLock);
+    if (m_vm->heap.operationInProgress() == FullCollection) {
+#if ENABLE(PARALLEL_GC)
+        m_opaqueRoots.clear();
+#else
+        ASSERT(m_opaqueRoots.isEmpty());
+#endif
+}
+    std::lock_guard<std::mutex> lock(m_markingMutex);
     m_parallelMarkersShouldExit = false;
     startNextPhase(Mark);
 }
@@ -152,9 +169,9 @@ void GCThreadSharedData::didStartMarking()
 void GCThreadSharedData::didFinishMarking()
 {
     {
-        MutexLocker markingLocker(m_markingLock);
+        std::lock_guard<std::mutex> lock(m_markingMutex);
         m_parallelMarkersShouldExit = true;
-        m_markingCondition.broadcast();
+        m_markingConditionVariable.notify_all();
     }
 
     ASSERT(m_currentPhase == Mark);
@@ -165,7 +182,15 @@ void GCThreadSharedData::didStartCopying()
 {
     {
         SpinLockHolder locker(&m_copyLock);
-        WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
+        if (m_vm->heap.operationInProgress() == EdenCollection) {
+            // Reset the vector to be empty, but don't throw away the backing store.
+            m_blocksToCopy.shrink(0);
+            for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next())
+                m_blocksToCopy.append(block);
+        } else {
+            ASSERT(m_vm->heap.operationInProgress() == FullCollection);
+            WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
+        }
         m_copyIndex = 0;
     }