]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/JSLock.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / runtime / JSLock.cpp
1 /*
2 * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the NU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
18 *
19 */
20
21 #include "config.h"
22 #include "JSLock.h"
23
24 #include "Heap.h"
25 #include "CallFrame.h"
26 #include "JSGlobalObject.h"
27 #include "JSObject.h"
28 #include "JSCInlines.h"
29 #include <thread>
30
31 namespace JSC {
32
33 std::mutex* GlobalJSLock::s_sharedInstanceMutex;
34
35 GlobalJSLock::GlobalJSLock()
36 {
37 s_sharedInstanceMutex->lock();
38 }
39
40 GlobalJSLock::~GlobalJSLock()
41 {
42 s_sharedInstanceMutex->unlock();
43 }
44
45 void GlobalJSLock::initialize()
46 {
47 s_sharedInstanceMutex = new std::mutex();
48 }
49
50 JSLockHolder::JSLockHolder(ExecState* exec)
51 : m_vm(&exec->vm())
52 {
53 init();
54 }
55
56 JSLockHolder::JSLockHolder(VM* vm)
57 : m_vm(vm)
58 {
59 init();
60 }
61
62 JSLockHolder::JSLockHolder(VM& vm)
63 : m_vm(&vm)
64 {
65 init();
66 }
67
68 void JSLockHolder::init()
69 {
70 m_vm->apiLock().lock();
71 }
72
73 JSLockHolder::~JSLockHolder()
74 {
75 RefPtr<JSLock> apiLock(&m_vm->apiLock());
76 m_vm = nullptr;
77 apiLock->unlock();
78 }
79
80 JSLock::JSLock(VM* vm)
81 : m_ownerThreadID(std::thread::id())
82 , m_lockCount(0)
83 , m_lockDropDepth(0)
84 , m_hasExclusiveThread(false)
85 , m_vm(vm)
86 , m_entryAtomicStringTable(nullptr)
87 {
88 }
89
90 JSLock::~JSLock()
91 {
92 }
93
94 void JSLock::willDestroyVM(VM* vm)
95 {
96 ASSERT_UNUSED(vm, m_vm == vm);
97 m_vm = nullptr;
98 }
99
100 void JSLock::setExclusiveThread(std::thread::id threadId)
101 {
102 RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
103 m_hasExclusiveThread = (threadId != std::thread::id());
104 m_ownerThreadID = threadId;
105 }
106
107 void JSLock::lock()
108 {
109 lock(1);
110 }
111
112 void JSLock::lock(intptr_t lockCount)
113 {
114 ASSERT(lockCount > 0);
115 if (currentThreadIsHoldingLock()) {
116 m_lockCount += lockCount;
117 return;
118 }
119
120 if (!m_hasExclusiveThread) {
121 m_lock.lock();
122 m_ownerThreadID = std::this_thread::get_id();
123 }
124 ASSERT(!m_lockCount);
125 m_lockCount = lockCount;
126
127 didAcquireLock();
128 }
129
130 void JSLock::didAcquireLock()
131 {
132 // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
133 if (!m_vm)
134 return;
135
136 RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry());
137 void* p = &p; // A proxy for the current stack pointer.
138 m_vm->setStackPointerAtVMEntry(p);
139
140 WTFThreadData& threadData = wtfThreadData();
141 m_vm->setLastStackTop(threadData.savedLastStackTop());
142
143 ASSERT(!m_entryAtomicStringTable);
144 m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable());
145 ASSERT(m_entryAtomicStringTable);
146
147 m_vm->heap.machineThreads().addCurrentThread();
148 }
149
150 void JSLock::unlock()
151 {
152 unlock(1);
153 }
154
155 void JSLock::unlock(intptr_t unlockCount)
156 {
157 RELEASE_ASSERT(currentThreadIsHoldingLock());
158 ASSERT(m_lockCount >= unlockCount);
159
160 // Maintain m_lockCount while calling willReleaseLock() so that its callees know that
161 // they still have the lock.
162 if (unlockCount == m_lockCount)
163 willReleaseLock();
164
165 m_lockCount -= unlockCount;
166
167 if (!m_lockCount) {
168
169 if (!m_hasExclusiveThread) {
170 m_ownerThreadID = std::thread::id();
171 m_lock.unlock();
172 }
173 }
174 }
175
176 void JSLock::willReleaseLock()
177 {
178 if (m_vm) {
179 m_vm->heap.releaseDelayedReleasedObjects();
180 m_vm->setStackPointerAtVMEntry(nullptr);
181 }
182
183 if (m_entryAtomicStringTable) {
184 wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable);
185 m_entryAtomicStringTable = nullptr;
186 }
187 }
188
189 void JSLock::lock(ExecState* exec)
190 {
191 exec->vm().apiLock().lock();
192 }
193
194 void JSLock::unlock(ExecState* exec)
195 {
196 exec->vm().apiLock().unlock();
197 }
198
199 bool JSLock::currentThreadIsHoldingLock()
200 {
201 ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id()));
202 if (m_hasExclusiveThread)
203 return !!m_lockCount;
204 return m_ownerThreadID == std::this_thread::get_id();
205 }
206
207 // This function returns the number of locks that were dropped.
208 unsigned JSLock::dropAllLocks(DropAllLocks* dropper)
209 {
210 if (m_hasExclusiveThread) {
211 ASSERT(exclusiveThread() == std::this_thread::get_id());
212 return 0;
213 }
214
215 if (!currentThreadIsHoldingLock())
216 return 0;
217
218 ++m_lockDropDepth;
219
220 dropper->setDropDepth(m_lockDropDepth);
221
222 WTFThreadData& threadData = wtfThreadData();
223 threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry());
224 threadData.setSavedLastStackTop(m_vm->lastStackTop());
225
226 unsigned droppedLockCount = m_lockCount;
227 unlock(droppedLockCount);
228
229 return droppedLockCount;
230 }
231
232 void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount)
233 {
234 ASSERT(!m_hasExclusiveThread || !droppedLockCount);
235
236 // If no locks were dropped, nothing to do!
237 if (!droppedLockCount)
238 return;
239
240 ASSERT(!currentThreadIsHoldingLock());
241 lock(droppedLockCount);
242
243 while (dropper->dropDepth() != m_lockDropDepth) {
244 unlock(droppedLockCount);
245 std::this_thread::yield();
246 lock(droppedLockCount);
247 }
248
249 --m_lockDropDepth;
250
251 WTFThreadData& threadData = wtfThreadData();
252 m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry());
253 m_vm->setLastStackTop(threadData.savedLastStackTop());
254 }
255
256 JSLock::DropAllLocks::DropAllLocks(VM* vm)
257 : m_droppedLockCount(0)
258 // If the VM is in the middle of being destroyed then we don't want to resurrect it
259 // by allowing DropAllLocks to ref it. By this point the JSLock has already been
260 // released anyways, so it doesn't matter that DropAllLocks is a no-op.
261 , m_vm(vm->refCount() ? vm : nullptr)
262 {
263 if (!m_vm)
264 return;
265 wtfThreadData().resetCurrentAtomicStringTable();
266 RELEASE_ASSERT(!m_vm->apiLock().currentThreadIsHoldingLock() || !m_vm->isCollectorBusy());
267 m_droppedLockCount = m_vm->apiLock().dropAllLocks(this);
268 }
269
270 JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
271 : DropAllLocks(exec ? &exec->vm() : nullptr)
272 {
273 }
274
275 JSLock::DropAllLocks::DropAllLocks(VM& vm)
276 : DropAllLocks(&vm)
277 {
278 }
279
280 JSLock::DropAllLocks::~DropAllLocks()
281 {
282 if (!m_vm)
283 return;
284 m_vm->apiLock().grabAllLocks(this, m_droppedLockCount);
285 wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable());
286 }
287
288 } // namespace JSC