]>
git.saurik.com Git - apple/javascriptcore.git/blob - runtime/JSLock.cpp
2 * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the NU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
25 #include "CallFrame.h"
26 #include "JSGlobalObject.h"
28 #include "JSCInlines.h"
33 std::mutex
* GlobalJSLock::s_sharedInstanceMutex
;
35 GlobalJSLock::GlobalJSLock()
37 s_sharedInstanceMutex
->lock();
40 GlobalJSLock::~GlobalJSLock()
42 s_sharedInstanceMutex
->unlock();
45 void GlobalJSLock::initialize()
47 s_sharedInstanceMutex
= new std::mutex();
50 JSLockHolder::JSLockHolder(ExecState
* exec
)
56 JSLockHolder::JSLockHolder(VM
* vm
)
62 JSLockHolder::JSLockHolder(VM
& vm
)
68 void JSLockHolder::init()
70 m_vm
->apiLock().lock();
73 JSLockHolder::~JSLockHolder()
75 RefPtr
<JSLock
> apiLock(&m_vm
->apiLock());
80 JSLock::JSLock(VM
* vm
)
81 : m_ownerThreadID(std::thread::id())
84 , m_hasExclusiveThread(false)
86 , m_entryAtomicStringTable(nullptr)
94 void JSLock::willDestroyVM(VM
* vm
)
96 ASSERT_UNUSED(vm
, m_vm
== vm
);
100 void JSLock::setExclusiveThread(std::thread::id threadId
)
102 RELEASE_ASSERT(!m_lockCount
&& m_ownerThreadID
== std::thread::id());
103 m_hasExclusiveThread
= (threadId
!= std::thread::id());
104 m_ownerThreadID
= threadId
;
112 void JSLock::lock(intptr_t lockCount
)
114 ASSERT(lockCount
> 0);
115 if (currentThreadIsHoldingLock()) {
116 m_lockCount
+= lockCount
;
120 if (!m_hasExclusiveThread
) {
122 m_ownerThreadID
= std::this_thread::get_id();
124 ASSERT(!m_lockCount
);
125 m_lockCount
= lockCount
;
130 void JSLock::didAcquireLock()
132 // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
136 RELEASE_ASSERT(!m_vm
->stackPointerAtVMEntry());
137 void* p
= &p
; // A proxy for the current stack pointer.
138 m_vm
->setStackPointerAtVMEntry(p
);
140 WTFThreadData
& threadData
= wtfThreadData();
141 m_vm
->setLastStackTop(threadData
.savedLastStackTop());
143 ASSERT(!m_entryAtomicStringTable
);
144 m_entryAtomicStringTable
= threadData
.setCurrentAtomicStringTable(m_vm
->atomicStringTable());
145 ASSERT(m_entryAtomicStringTable
);
147 m_vm
->heap
.machineThreads().addCurrentThread();
150 void JSLock::unlock()
155 void JSLock::unlock(intptr_t unlockCount
)
157 RELEASE_ASSERT(currentThreadIsHoldingLock());
158 ASSERT(m_lockCount
>= unlockCount
);
160 m_lockCount
-= unlockCount
;
165 if (!m_hasExclusiveThread
) {
166 m_ownerThreadID
= std::thread::id();
172 void JSLock::willReleaseLock()
175 m_vm
->setStackPointerAtVMEntry(nullptr);
177 if (m_entryAtomicStringTable
) {
178 wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable
);
179 m_entryAtomicStringTable
= nullptr;
183 void JSLock::lock(ExecState
* exec
)
185 exec
->vm().apiLock().lock();
188 void JSLock::unlock(ExecState
* exec
)
190 exec
->vm().apiLock().unlock();
193 bool JSLock::currentThreadIsHoldingLock()
195 ASSERT(!m_hasExclusiveThread
|| (exclusiveThread() == std::this_thread::get_id()));
196 if (m_hasExclusiveThread
)
197 return !!m_lockCount
;
198 return m_ownerThreadID
== std::this_thread::get_id();
201 // This function returns the number of locks that were dropped.
202 unsigned JSLock::dropAllLocks(DropAllLocks
* dropper
)
204 if (m_hasExclusiveThread
) {
205 ASSERT(exclusiveThread() == std::this_thread::get_id());
209 // Check if this thread is currently holding the lock.
210 // FIXME: Maybe we want to require this, guard with an ASSERT?
211 if (!currentThreadIsHoldingLock())
216 dropper
->setDropDepth(m_lockDropDepth
);
218 WTFThreadData
& threadData
= wtfThreadData();
219 threadData
.setSavedStackPointerAtVMEntry(m_vm
->stackPointerAtVMEntry());
220 threadData
.setSavedLastStackTop(m_vm
->lastStackTop());
222 unsigned droppedLockCount
= m_lockCount
;
223 unlock(droppedLockCount
);
225 return droppedLockCount
;
228 void JSLock::grabAllLocks(DropAllLocks
* dropper
, unsigned droppedLockCount
)
230 ASSERT(!m_hasExclusiveThread
|| !droppedLockCount
);
232 // If no locks were dropped, nothing to do!
233 if (!droppedLockCount
)
236 ASSERT(!currentThreadIsHoldingLock());
237 lock(droppedLockCount
);
239 while (dropper
->dropDepth() != m_lockDropDepth
) {
240 unlock(droppedLockCount
);
241 std::this_thread::yield();
242 lock(droppedLockCount
);
247 WTFThreadData
& threadData
= wtfThreadData();
248 m_vm
->setStackPointerAtVMEntry(threadData
.savedStackPointerAtVMEntry());
249 m_vm
->setLastStackTop(threadData
.savedLastStackTop());
252 JSLock::DropAllLocks::DropAllLocks(VM
* vm
)
253 : m_droppedLockCount(0)
254 // If the VM is in the middle of being destroyed then we don't want to resurrect it
255 // by allowing DropAllLocks to ref it. By this point the JSLock has already been
256 // released anyways, so it doesn't matter that DropAllLocks is a no-op.
257 , m_vm(vm
->refCount() ? vm
: nullptr)
261 wtfThreadData().resetCurrentAtomicStringTable();
262 RELEASE_ASSERT(!m_vm
->isCollectorBusy());
263 m_droppedLockCount
= m_vm
->apiLock().dropAllLocks(this);
266 JSLock::DropAllLocks::DropAllLocks(ExecState
* exec
)
267 : DropAllLocks(exec
? &exec
->vm() : nullptr)
271 JSLock::DropAllLocks::DropAllLocks(VM
& vm
)
276 JSLock::DropAllLocks::~DropAllLocks()
280 m_vm
->apiLock().grabAllLocks(this, m_droppedLockCount
);
281 wtfThreadData().setCurrentAtomicStringTable(m_vm
->atomicStringTable());