]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2009, 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "GCThreadSharedData.h" | |
28 | ||
29 | #include "CopyVisitor.h" | |
30 | #include "CopyVisitorInlines.h" | |
31 | #include "GCThread.h" | |
32 | #include "MarkStack.h" | |
33 | #include "JSCInlines.h" | |
34 | #include "SlotVisitor.h" | |
35 | #include "SlotVisitorInlines.h" | |
36 | #include "VM.h" | |
37 | ||
38 | namespace JSC { | |
39 | ||
40 | #if ENABLE(PARALLEL_GC) | |
41 | void GCThreadSharedData::resetChildren() | |
42 | { | |
43 | for (size_t i = 0; i < m_gcThreads.size(); ++i) | |
44 | m_gcThreads[i]->slotVisitor()->reset(); | |
45 | } | |
46 | ||
47 | size_t GCThreadSharedData::childVisitCount() | |
48 | { | |
49 | unsigned long result = 0; | |
50 | for (unsigned i = 0; i < m_gcThreads.size(); ++i) | |
51 | result += m_gcThreads[i]->slotVisitor()->visitCount(); | |
52 | return result; | |
53 | } | |
54 | ||
55 | size_t GCThreadSharedData::childBytesVisited() | |
56 | { | |
57 | size_t result = 0; | |
58 | for (unsigned i = 0; i < m_gcThreads.size(); ++i) | |
59 | result += m_gcThreads[i]->slotVisitor()->bytesVisited(); | |
60 | return result; | |
61 | } | |
62 | ||
63 | size_t GCThreadSharedData::childBytesCopied() | |
64 | { | |
65 | size_t result = 0; | |
66 | for (unsigned i = 0; i < m_gcThreads.size(); ++i) | |
67 | result += m_gcThreads[i]->slotVisitor()->bytesCopied(); | |
68 | return result; | |
69 | } | |
70 | #endif | |
71 | ||
72 | GCThreadSharedData::GCThreadSharedData(VM* vm) | |
73 | : m_vm(vm) | |
74 | , m_copiedSpace(&vm->heap.m_storageSpace) | |
75 | , m_shouldHashCons(false) | |
76 | , m_sharedMarkStack() | |
77 | , m_numberOfActiveParallelMarkers(0) | |
78 | , m_parallelMarkersShouldExit(false) | |
79 | , m_copyIndex(0) | |
80 | , m_numberOfActiveGCThreads(0) | |
81 | , m_gcThreadsShouldWait(false) | |
82 | , m_currentPhase(NoPhase) | |
83 | { | |
84 | #if ENABLE(PARALLEL_GC) | |
85 | // Grab the lock so the new GC threads can be properly initialized before they start running. | |
86 | std::unique_lock<std::mutex> lock(m_phaseMutex); | |
87 | for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) { | |
88 | m_numberOfActiveGCThreads++; | |
89 | GCThread* newThread = new GCThread(*this, std::make_unique<SlotVisitor>(*this), std::make_unique<CopyVisitor>(*this)); | |
90 | ThreadIdentifier threadID = createThread(GCThread::gcThreadStartFunc, newThread, "JavaScriptCore::Marking"); | |
91 | newThread->initializeThreadID(threadID); | |
92 | m_gcThreads.append(newThread); | |
93 | } | |
94 | ||
95 | // Wait for all the GCThreads to get to the right place. | |
96 | m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; }); | |
97 | #endif | |
98 | } | |
99 | ||
100 | GCThreadSharedData::~GCThreadSharedData() | |
101 | { | |
102 | #if ENABLE(PARALLEL_GC) | |
103 | // Destroy our marking threads. | |
104 | { | |
105 | std::lock_guard<std::mutex> markingLock(m_markingMutex); | |
106 | std::lock_guard<std::mutex> phaseLock(m_phaseMutex); | |
107 | ASSERT(m_currentPhase == NoPhase); | |
108 | m_parallelMarkersShouldExit = true; | |
109 | m_gcThreadsShouldWait = false; | |
110 | m_currentPhase = Exit; | |
111 | m_phaseConditionVariable.notify_all(); | |
112 | } | |
113 | for (unsigned i = 0; i < m_gcThreads.size(); ++i) { | |
114 | waitForThreadCompletion(m_gcThreads[i]->threadID()); | |
115 | delete m_gcThreads[i]; | |
116 | } | |
117 | #endif | |
118 | } | |
119 | ||
120 | void GCThreadSharedData::reset() | |
121 | { | |
122 | ASSERT(m_sharedMarkStack.isEmpty()); | |
123 | ||
124 | m_weakReferenceHarvesters.removeAll(); | |
125 | ||
126 | if (m_shouldHashCons) { | |
127 | m_vm->resetNewStringsSinceLastHashCons(); | |
128 | m_shouldHashCons = false; | |
129 | } | |
130 | } | |
131 | ||
132 | void GCThreadSharedData::startNextPhase(GCPhase phase) | |
133 | { | |
134 | std::lock_guard<std::mutex> lock(m_phaseMutex); | |
135 | ASSERT(!m_gcThreadsShouldWait); | |
136 | ASSERT(m_currentPhase == NoPhase); | |
137 | m_gcThreadsShouldWait = true; | |
138 | m_currentPhase = phase; | |
139 | m_phaseConditionVariable.notify_all(); | |
140 | } | |
141 | ||
142 | void GCThreadSharedData::endCurrentPhase() | |
143 | { | |
144 | ASSERT(m_gcThreadsShouldWait); | |
145 | std::unique_lock<std::mutex> lock(m_phaseMutex); | |
146 | m_currentPhase = NoPhase; | |
147 | m_gcThreadsShouldWait = false; | |
148 | m_phaseConditionVariable.notify_all(); | |
149 | m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; }); | |
150 | } | |
151 | ||
152 | void GCThreadSharedData::didStartMarking() | |
153 | { | |
154 | if (m_vm->heap.operationInProgress() == FullCollection) { | |
155 | #if ENABLE(PARALLEL_GC) | |
156 | m_opaqueRoots.clear(); | |
157 | #else | |
158 | ASSERT(m_opaqueRoots.isEmpty()); | |
159 | #endif | |
160 | } | |
161 | std::lock_guard<std::mutex> lock(m_markingMutex); | |
162 | m_parallelMarkersShouldExit = false; | |
163 | startNextPhase(Mark); | |
164 | } | |
165 | ||
166 | void GCThreadSharedData::didFinishMarking() | |
167 | { | |
168 | { | |
169 | std::lock_guard<std::mutex> lock(m_markingMutex); | |
170 | m_parallelMarkersShouldExit = true; | |
171 | m_markingConditionVariable.notify_all(); | |
172 | } | |
173 | ||
174 | ASSERT(m_currentPhase == Mark); | |
175 | endCurrentPhase(); | |
176 | } | |
177 | ||
178 | void GCThreadSharedData::didStartCopying() | |
179 | { | |
180 | { | |
181 | SpinLockHolder locker(&m_copyLock); | |
182 | if (m_vm->heap.operationInProgress() == EdenCollection) { | |
183 | // Reset the vector to be empty, but don't throw away the backing store. | |
184 | m_blocksToCopy.shrink(0); | |
185 | for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next()) | |
186 | m_blocksToCopy.append(block); | |
187 | } else { | |
188 | ASSERT(m_vm->heap.operationInProgress() == FullCollection); | |
189 | WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); | |
190 | } | |
191 | m_copyIndex = 0; | |
192 | } | |
193 | ||
194 | // We do this here so that we avoid a race condition where the main thread can | |
195 | // blow through all of the copying work before the GCThreads fully wake up. | |
196 | // The GCThreads then request a block from the CopiedSpace when the copying phase | |
197 | // has completed, which isn't allowed. | |
198 | for (size_t i = 0; i < m_gcThreads.size(); i++) | |
199 | m_gcThreads[i]->copyVisitor()->startCopying(); | |
200 | ||
201 | startNextPhase(Copy); | |
202 | } | |
203 | ||
204 | void GCThreadSharedData::didFinishCopying() | |
205 | { | |
206 | ASSERT(m_currentPhase == Copy); | |
207 | endCurrentPhase(); | |
208 | } | |
209 | ||
210 | } // namespace JSC |