/*
- * Copyright (C) 2005, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#include "CallFrame.h"
#include "JSGlobalObject.h"
#include "JSObject.h"
-#include "ScopeChain.h"
-
-#if USE(PTHREADS)
-#include <pthread.h>
-#endif
+#include "JSCInlines.h"
+#include <thread>
namespace JSC {
-// JSLock is only needed to support an obsolete execution model where JavaScriptCore
-// automatically protected against concurrent access from multiple threads.
-// So it's safe to disable it on non-mac platforms where we don't have native pthreads.
-#if (OS(DARWIN) || USE(PTHREADS))
-
-static pthread_mutex_t sharedInstanceLock = PTHREAD_MUTEX_INITIALIZER;
+std::mutex* GlobalJSLock::s_sharedInstanceMutex;
GlobalJSLock::GlobalJSLock()
{
- pthread_mutex_lock(&sharedInstanceLock);
+ s_sharedInstanceMutex->lock();
}
GlobalJSLock::~GlobalJSLock()
{
- pthread_mutex_unlock(&sharedInstanceLock);
+ s_sharedInstanceMutex->unlock();
}
-JSLockHolder::JSLockHolder(ExecState* exec)
- : m_globalData(&exec->globalData())
+void GlobalJSLock::initialize()
{
- m_globalData->apiLock().lock();
+ s_sharedInstanceMutex = new std::mutex();
}
-JSLockHolder::JSLockHolder(JSGlobalData* globalData)
- : m_globalData(globalData)
+JSLockHolder::JSLockHolder(ExecState* exec)
+ : m_vm(&exec->vm())
{
- m_globalData->apiLock().lock();
+ init();
}
-JSLockHolder::JSLockHolder(JSGlobalData& globalData)
- : m_globalData(&globalData)
+JSLockHolder::JSLockHolder(VM* vm)
+ : m_vm(vm)
{
- m_globalData->apiLock().lock();
+ init();
}
-JSLockHolder::~JSLockHolder()
+JSLockHolder::JSLockHolder(VM& vm)
+ : m_vm(&vm)
{
- m_globalData->apiLock().unlock();
+ init();
}
-JSLock::JSLock()
- : m_lockCount(0)
+void JSLockHolder::init()
{
- m_spinLock.Init();
+ m_vm->apiLock().lock();
}
-JSLock::~JSLock()
+JSLockHolder::~JSLockHolder()
{
+ RefPtr<JSLock> apiLock(&m_vm->apiLock());
+ m_vm.clear();
+ apiLock->unlock();
}
-void JSLock::lock()
+JSLock::JSLock(VM* vm)
+ : m_ownerThreadID(std::thread::id())
+ , m_lockCount(0)
+ , m_lockDropDepth(0)
+ , m_hasExclusiveThread(false)
+ , m_vm(vm)
+ , m_entryAtomicStringTable(nullptr)
{
- ThreadIdentifier currentThread = WTF::currentThread();
- {
- SpinLockHolder holder(&m_spinLock);
- if (m_ownerThread == currentThread && m_lockCount) {
- m_lockCount++;
- return;
- }
- }
-
- m_lock.lock();
-
- {
- SpinLockHolder holder(&m_spinLock);
- m_ownerThread = currentThread;
- ASSERT(!m_lockCount);
- m_lockCount = 1;
- }
}
-void JSLock::unlock()
+JSLock::~JSLock()
{
- ASSERT(currentThreadIsHoldingLock());
-
- SpinLockHolder holder(&m_spinLock);
- m_lockCount--;
-
- if (!m_lockCount)
- m_lock.unlock();
}
-void JSLock::lock(ExecState* exec)
+void JSLock::willDestroyVM(VM* vm)
{
- exec->globalData().apiLock().lock();
+ ASSERT_UNUSED(vm, m_vm == vm);
+ m_vm = nullptr;
}
-void JSLock::unlock(ExecState* exec)
+void JSLock::setExclusiveThread(std::thread::id threadId)
{
- exec->globalData().apiLock().unlock();
+ RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
+ m_hasExclusiveThread = (threadId != std::thread::id());
+ m_ownerThreadID = threadId;
}
-bool JSLock::currentThreadIsHoldingLock()
+void JSLock::lock()
{
- return m_lockCount && m_ownerThread == WTF::currentThread();
+ lock(1);
}
-// This is fairly nasty. We allow multiple threads to run on the same
-// context, and we do not require any locking semantics in doing so -
-// clients of the API may simply use the context from multiple threads
-// concurently, and assume this will work. In order to make this work,
-// We lock the context when a thread enters, and unlock it when it leaves.
-// However we do not only unlock when the thread returns from its
-// entry point (evaluate script or call function), we also unlock the
-// context if the thread leaves JSC by making a call out to an external
-// function through a callback.
-//
-// All threads using the context share the same JS stack (the RegisterFile).
-// Whenever a thread calls into JSC it starts using the RegisterFile from the
-// previous 'high water mark' - the maximum point the stack has ever grown to
-// (returned by RegisterFile::end()). So if a first thread calls out to a
-// callback, and a second thread enters JSC, then also exits by calling out
-// to a callback, we can be left with stackframes from both threads in the
-// RegisterFile. As such, a problem may occur should the first thread's
-// callback complete first, and attempt to return to JSC. Were we to allow
-// this to happen, and were its stack to grow further, then it may potentially
-// write over the second thread's call frames.
-//
-// To avoid JS stack corruption we enforce a policy of only ever allowing two
-// threads to use a JS context concurrently, and only allowing the second of
-// these threads to execute until it has completed and fully returned from its
-// outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
-// first time a thread exits it will call DropAllLocks - which will do as expected
-// and drop locks allowing another thread to enter. Should another thread, or the
-// same thread again, enter JSC (through evaluate script or call function), and exit
-// again through a callback, then the locks will not be dropped when DropAllLocks
-// is called (since lockDropDepth is non-zero). Since this thread is still holding
-// the locks, only it will be able to re-enter JSC (either be returning from the
-// callback, or by re-entering through another call to evaulate script or call
-// function).
-//
-// This policy is slightly more restricive than it needs to be for correctness -
-// we could validly allow futher entries into JSC from other threads, we only
-// need ensure that callbacks return in the reverse chronological order of the
-// order in which they were made - though implementing the less restrictive policy
-// would likely increase complexity and overhead.
-//
-
-// This function returns the number of locks that were dropped.
-unsigned JSLock::dropAllLocks()
+void JSLock::lock(intptr_t lockCount)
{
- unsigned lockCount;
- {
- // Check if this thread is currently holding the lock.
- // FIXME: Maybe we want to require this, guard with an ASSERT?
- SpinLockHolder holder(&m_spinLock);
- lockCount = m_lockCount;
- if (!lockCount || m_ownerThread != WTF::currentThread())
- return 0;
+ ASSERT(lockCount > 0);
+ if (currentThreadIsHoldingLock()) {
+ m_lockCount += lockCount;
+ return;
}
- // Don't drop the locks if they've already been dropped once.
- // (If the prior drop came from another thread, and it resumed first,
- // it could trash our register file).
- if (m_lockDropDepth)
- return 0;
-
- // m_lockDropDepth is only incremented if any locks were dropped.
- m_lockDropDepth++;
- m_lockCount = 0;
- m_lock.unlock();
- return lockCount;
-}
-
-unsigned JSLock::dropAllLocksUnconditionally()
-{
- unsigned lockCount;
- {
- // Check if this thread is currently holding the lock.
- // FIXME: Maybe we want to require this, guard with an ASSERT?
- SpinLockHolder holder(&m_spinLock);
- lockCount = m_lockCount;
- if (!lockCount || m_ownerThread != WTF::currentThread())
- return 0;
+ if (!m_hasExclusiveThread) {
+ m_lock.lock();
+ m_ownerThreadID = std::this_thread::get_id();
}
+ ASSERT(!m_lockCount);
+ m_lockCount = lockCount;
- // m_lockDropDepth is only incremented if any locks were dropped.
- m_lockDropDepth++;
- m_lockCount = 0;
- m_lock.unlock();
- return lockCount;
+ didAcquireLock();
}
-void JSLock::grabAllLocks(unsigned lockCount)
+void JSLock::didAcquireLock()
{
- // If no locks were dropped, nothing to do!
- if (!lockCount)
+ // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
+ if (!m_vm)
return;
- ThreadIdentifier currentThread = WTF::currentThread();
- {
- // Check if this thread is currently holding the lock.
- // FIXME: Maybe we want to prohibit this, guard against with an ASSERT?
- SpinLockHolder holder(&m_spinLock);
- if (m_ownerThread == currentThread && m_lockCount) {
- m_lockCount += lockCount;
- m_lockDropDepth--;
- return;
- }
- }
+ RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry());
+ void* p = &p; // A proxy for the current stack pointer.
+ m_vm->setStackPointerAtVMEntry(p);
- m_lock.lock();
+ WTFThreadData& threadData = wtfThreadData();
+ m_vm->setLastStackTop(threadData.savedLastStackTop());
- {
- SpinLockHolder holder(&m_spinLock);
- m_ownerThread = currentThread;
- ASSERT(!m_lockCount);
- m_lockCount = lockCount;
- m_lockDropDepth--;
- }
-}
+ ASSERT(!m_entryAtomicStringTable);
+ m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable());
+ ASSERT(m_entryAtomicStringTable);
-JSLock::DropAllLocks::DropAllLocks(ExecState* exec, AlwaysDropLocksTag alwaysDropLocks)
- : m_lockCount(0)
- , m_globalData(&exec->globalData())
-{
- if (alwaysDropLocks)
- m_lockCount = m_globalData->apiLock().dropAllLocksUnconditionally();
- else
- m_lockCount = m_globalData->apiLock().dropAllLocks();
+ m_vm->heap.machineThreads().addCurrentThread();
}
-JSLock::DropAllLocks::DropAllLocks(JSGlobalData* globalData, AlwaysDropLocksTag alwaysDropLocks)
- : m_lockCount(0)
- , m_globalData(globalData)
+void JSLock::unlock()
{
- if (alwaysDropLocks)
- m_lockCount = m_globalData->apiLock().dropAllLocksUnconditionally();
- else
- m_lockCount = m_globalData->apiLock().dropAllLocks();
+ unlock(1);
}
-JSLock::DropAllLocks::~DropAllLocks()
+void JSLock::unlock(intptr_t unlockCount)
{
- m_globalData->apiLock().grabAllLocks(m_lockCount);
-}
+ RELEASE_ASSERT(currentThreadIsHoldingLock());
+ ASSERT(m_lockCount >= unlockCount);
-#else // (OS(DARWIN) || USE(PTHREADS))
+ m_lockCount -= unlockCount;
-GlobalJSLock::GlobalJSLock()
-{
-}
+ if (!m_lockCount) {
+ willReleaseLock();
-GlobalJSLock::~GlobalJSLock()
-{
+ if (!m_hasExclusiveThread) {
+ m_ownerThreadID = std::thread::id();
+ m_lock.unlock();
+ }
+ }
}
-JSLockHolder::JSLockHolder(JSGlobalData*)
+void JSLock::willReleaseLock()
{
-}
+ if (m_vm)
+ m_vm->setStackPointerAtVMEntry(nullptr);
-JSLockHolder::JSLockHolder(JSGlobalData&)
-{
+ if (m_entryAtomicStringTable) {
+ wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable);
+ m_entryAtomicStringTable = nullptr;
+ }
}
-JSLockHolder::JSLockHolder(ExecState*)
+void JSLock::lock(ExecState* exec)
{
+ exec->vm().apiLock().lock();
}
-JSLockHolder::~JSLockHolder()
+void JSLock::unlock(ExecState* exec)
{
+ exec->vm().apiLock().unlock();
}
-JSLock::JSLock()
+bool JSLock::currentThreadIsHoldingLock()
{
+ ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id()));
+ if (m_hasExclusiveThread)
+ return !!m_lockCount;
+ return m_ownerThreadID == std::this_thread::get_id();
}
-JSLock::~JSLock()
+// This function returns the number of locks that were dropped.
+unsigned JSLock::dropAllLocks(DropAllLocks* dropper)
{
-}
+ if (m_hasExclusiveThread) {
+ ASSERT(exclusiveThread() == std::this_thread::get_id());
+ return 0;
+ }
-bool JSLock::currentThreadIsHoldingLock()
-{
- return true;
-}
+ // Check if this thread is currently holding the lock.
+ // FIXME: Maybe we want to require this, guard with an ASSERT?
+ if (!currentThreadIsHoldingLock())
+ return 0;
-void JSLock::lock()
-{
-}
+ ++m_lockDropDepth;
-void JSLock::unlock()
-{
-}
+ dropper->setDropDepth(m_lockDropDepth);
-void JSLock::lock(ExecState*)
-{
-}
+ WTFThreadData& threadData = wtfThreadData();
+ threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry());
+ threadData.setSavedLastStackTop(m_vm->lastStackTop());
-void JSLock::unlock(ExecState*)
-{
-}
+ unsigned droppedLockCount = m_lockCount;
+ unlock(droppedLockCount);
-void JSLock::lock(JSGlobalData&)
-{
+ return droppedLockCount;
}
-void JSLock::unlock(JSGlobalData&)
+void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount)
{
-}
+ ASSERT(!m_hasExclusiveThread || !droppedLockCount);
-unsigned JSLock::dropAllLocks()
-{
- return 0;
-}
+ // If no locks were dropped, nothing to do!
+ if (!droppedLockCount)
+ return;
-unsigned JSLock::dropAllLocksUnconditionally()
-{
- return 0;
+ ASSERT(!currentThreadIsHoldingLock());
+ lock(droppedLockCount);
+
+ while (dropper->dropDepth() != m_lockDropDepth) {
+ unlock(droppedLockCount);
+ std::this_thread::yield();
+ lock(droppedLockCount);
+ }
+
+ --m_lockDropDepth;
+
+ WTFThreadData& threadData = wtfThreadData();
+ m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry());
+ m_vm->setLastStackTop(threadData.savedLastStackTop());
}
-void JSLock::grabAllLocks(unsigned)
+JSLock::DropAllLocks::DropAllLocks(VM* vm)
+ : m_droppedLockCount(0)
+ // If the VM is in the middle of being destroyed then we don't want to resurrect it
+ // by allowing DropAllLocks to ref it. By this point the JSLock has already been
+ // released anyways, so it doesn't matter that DropAllLocks is a no-op.
+ , m_vm(vm->refCount() ? vm : nullptr)
{
+ if (!m_vm)
+ return;
+ wtfThreadData().resetCurrentAtomicStringTable();
+ RELEASE_ASSERT(!m_vm->isCollectorBusy());
+ m_droppedLockCount = m_vm->apiLock().dropAllLocks(this);
}
-JSLock::DropAllLocks::DropAllLocks(ExecState*)
+JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
+ : DropAllLocks(exec ? &exec->vm() : nullptr)
{
}
-JSLock::DropAllLocks::DropAllLocks(JSGlobalData*)
+JSLock::DropAllLocks::DropAllLocks(VM& vm)
+ : DropAllLocks(&vm)
{
}
JSLock::DropAllLocks::~DropAllLocks()
{
+ if (!m_vm)
+ return;
+ m_vm->apiLock().grabAllLocks(this, m_droppedLockCount);
+ wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable());
}
-#endif // (OS(DARWIN) || USE(PTHREADS))
-
} // namespace JSC