/*
- * Copyright (C) 2005, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2005, 2008, 2012, 2014 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#include "config.h"
#include "JSLock.h"
-#include "Collector.h"
+#include "Heap.h"
#include "CallFrame.h"
-
-#if ENABLE(JSC_MULTIPLE_THREADS)
-#include <pthread.h>
-#endif
+#include "JSGlobalObject.h"
+#include "JSObject.h"
+#include "JSCInlines.h"
+#include <thread>
namespace JSC {
-#if ENABLE(JSC_MULTIPLE_THREADS)
-
-// Acquire this mutex before accessing lock-related data.
-static pthread_mutex_t JSMutex = PTHREAD_MUTEX_INITIALIZER;
+std::mutex* GlobalJSLock::s_sharedInstanceMutex;
-// Thread-specific key that tells whether a thread holds the JSMutex, and how many times it was taken recursively.
-pthread_key_t JSLockCount;
-
-static void createJSLockCount()
+GlobalJSLock::GlobalJSLock()
{
- pthread_key_create(&JSLockCount, 0);
+ s_sharedInstanceMutex->lock();
}
-pthread_once_t createJSLockCountOnce = PTHREAD_ONCE_INIT;
-
-// Lock nesting count.
-intptr_t JSLock::lockCount()
+GlobalJSLock::~GlobalJSLock()
{
- pthread_once(&createJSLockCountOnce, createJSLockCount);
-
- return reinterpret_cast<intptr_t>(pthread_getspecific(JSLockCount));
+ s_sharedInstanceMutex->unlock();
}
-static void setLockCount(intptr_t count)
+void GlobalJSLock::initialize()
{
- ASSERT(count >= 0);
- pthread_setspecific(JSLockCount, reinterpret_cast<void*>(count));
+ s_sharedInstanceMutex = new std::mutex();
}
-JSLock::JSLock(ExecState* exec)
- : m_lockBehavior(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly)
+JSLockHolder::JSLockHolder(ExecState* exec)
+ : m_vm(&exec->vm())
{
- lock(m_lockBehavior);
+ init();
}
-void JSLock::lock(JSLockBehavior lockBehavior)
+JSLockHolder::JSLockHolder(VM* vm)
+ : m_vm(vm)
{
-#ifdef NDEBUG
- // Locking "not for real" is a debug-only feature.
- if (lockBehavior == SilenceAssertionsOnly)
- return;
-#endif
+ init();
+}
- pthread_once(&createJSLockCountOnce, createJSLockCount);
+JSLockHolder::JSLockHolder(VM& vm)
+ : m_vm(&vm)
+{
+ init();
+}
- intptr_t currentLockCount = lockCount();
- if (!currentLockCount && lockBehavior == LockForReal) {
- int result;
- result = pthread_mutex_lock(&JSMutex);
- ASSERT(!result);
- }
- setLockCount(currentLockCount + 1);
+void JSLockHolder::init()
+{
+ m_vm->apiLock().lock();
}
-void JSLock::unlock(JSLockBehavior lockBehavior)
+JSLockHolder::~JSLockHolder()
{
- ASSERT(lockCount());
+ RefPtr<JSLock> apiLock(&m_vm->apiLock());
+ m_vm.clear();
+ apiLock->unlock();
+}
-#ifdef NDEBUG
- // Locking "not for real" is a debug-only feature.
- if (lockBehavior == SilenceAssertionsOnly)
- return;
-#endif
-
- intptr_t newLockCount = lockCount() - 1;
- setLockCount(newLockCount);
- if (!newLockCount && lockBehavior == LockForReal) {
- int result;
- result = pthread_mutex_unlock(&JSMutex);
- ASSERT(!result);
- }
+JSLock::JSLock(VM* vm)
+ : m_ownerThreadID(std::thread::id())
+ , m_lockCount(0)
+ , m_lockDropDepth(0)
+ , m_hasExclusiveThread(false)
+ , m_vm(vm)
+ , m_entryAtomicStringTable(nullptr)
+{
}
-void JSLock::lock(ExecState* exec)
+JSLock::~JSLock()
{
- lock(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly);
}
-void JSLock::unlock(ExecState* exec)
+void JSLock::willDestroyVM(VM* vm)
{
- unlock(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly);
+ ASSERT_UNUSED(vm, m_vm == vm);
+ m_vm = nullptr;
}
-bool JSLock::currentThreadIsHoldingLock()
+void JSLock::setExclusiveThread(std::thread::id threadId)
{
- pthread_once(&createJSLockCountOnce, createJSLockCount);
- return !!pthread_getspecific(JSLockCount);
-}
-
-// This is fairly nasty. We allow multiple threads to run on the same
-// context, and we do not require any locking semantics in doing so -
-// clients of the API may simply use the context from multiple threads
-// concurently, and assume this will work. In order to make this work,
-// We lock the context when a thread enters, and unlock it when it leaves.
-// However we do not only unlock when the thread returns from its
-// entry point (evaluate script or call function), we also unlock the
-// context if the thread leaves JSC by making a call out to an external
-// function through a callback.
-//
-// All threads using the context share the same JS stack (the RegisterFile).
-// Whenever a thread calls into JSC it starts using the RegisterFile from the
-// previous 'high water mark' - the maximum point the stack has ever grown to
-// (returned by RegisterFile::end()). So if a first thread calls out to a
-// callback, and a second thread enters JSC, then also exits by calling out
-// to a callback, we can be left with stackframes from both threads in the
-// RegisterFile. As such, a problem may occur should the first thread's
-// callback complete first, and attempt to return to JSC. Were we to allow
-// this to happen, and were its stack to grow further, then it may potentially
-// write over the second thread's call frames.
-//
-// In avoid JS stack corruption we enforce a policy of only ever allowing two
-// threads to use a JS context concurrently, and only allowing the second of
-// these threads to execute until it has completed and fully returned from its
-// outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
-// first time a thread exits it will call DropAllLocks - which will do as expected
-// and drop locks allowing another thread to enter. Should another thread, or the
-// same thread again, enter JSC (through evaluate script or call function), and exit
-// again through a callback, then the locks will not be dropped when DropAllLocks
-// is called (since lockDropDepth is non-zero). Since this thread is still holding
-// the locks, only it will re able to re-enter JSC (either be returning from the
-// callback, or by re-entering through another call to evaulate script or call
-// function).
-//
-// This policy is slightly more restricive than it needs to be for correctness -
-// we could validly allow futher entries into JSC from other threads, we only
-// need ensure that callbacks return in the reverse chronological order of the
-// order in which they were made - though implementing the less restrictive policy
-// would likely increase complexity and overhead.
-//
-static unsigned lockDropDepth = 0;
+ RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
+ m_hasExclusiveThread = (threadId != std::thread::id());
+ m_ownerThreadID = threadId;
+}
-JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
- : m_lockBehavior(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly)
+void JSLock::lock()
{
- pthread_once(&createJSLockCountOnce, createJSLockCount);
+ lock(1);
+}
- if (lockDropDepth++) {
- m_lockCount = 0;
+void JSLock::lock(intptr_t lockCount)
+{
+ ASSERT(lockCount > 0);
+ if (currentThreadIsHoldingLock()) {
+ m_lockCount += lockCount;
return;
}
- m_lockCount = JSLock::lockCount();
- for (intptr_t i = 0; i < m_lockCount; i++)
- JSLock::unlock(m_lockBehavior);
+ if (!m_hasExclusiveThread) {
+ m_lock.lock();
+ m_ownerThreadID = std::this_thread::get_id();
+ }
+ ASSERT(!m_lockCount);
+ m_lockCount = lockCount;
+
+ didAcquireLock();
}
-JSLock::DropAllLocks::DropAllLocks(JSLockBehavior JSLockBehavior)
- : m_lockBehavior(JSLockBehavior)
+void JSLock::didAcquireLock()
{
- pthread_once(&createJSLockCountOnce, createJSLockCount);
-
- if (lockDropDepth++) {
- m_lockCount = 0;
+ // FIXME: What should happen to the per-thread identifier table if we don't have a VM?
+ if (!m_vm)
return;
- }
- // It is necessary to drop even "unreal" locks, because having a non-zero lock count
- // will prevent a real lock from being taken.
+ RELEASE_ASSERT(!m_vm->stackPointerAtVMEntry());
+ void* p = &p; // A proxy for the current stack pointer.
+ m_vm->setStackPointerAtVMEntry(p);
+
+ WTFThreadData& threadData = wtfThreadData();
+ m_vm->setLastStackTop(threadData.savedLastStackTop());
- m_lockCount = JSLock::lockCount();
- for (intptr_t i = 0; i < m_lockCount; i++)
- JSLock::unlock(m_lockBehavior);
+ ASSERT(!m_entryAtomicStringTable);
+ m_entryAtomicStringTable = threadData.setCurrentAtomicStringTable(m_vm->atomicStringTable());
+ ASSERT(m_entryAtomicStringTable);
+
+ m_vm->heap.machineThreads().addCurrentThread();
}
-JSLock::DropAllLocks::~DropAllLocks()
+void JSLock::unlock()
{
- for (intptr_t i = 0; i < m_lockCount; i++)
- JSLock::lock(m_lockBehavior);
-
- --lockDropDepth;
+ unlock(1);
}
-#else
+void JSLock::unlock(intptr_t unlockCount)
+{
+ RELEASE_ASSERT(currentThreadIsHoldingLock());
+ ASSERT(m_lockCount >= unlockCount);
+
+ m_lockCount -= unlockCount;
+
+ if (!m_lockCount) {
+ willReleaseLock();
+
+ if (!m_hasExclusiveThread) {
+ m_ownerThreadID = std::thread::id();
+ m_lock.unlock();
+ }
+ }
+}
-JSLock::JSLock(ExecState*)
- : m_lockBehavior(SilenceAssertionsOnly)
+void JSLock::willReleaseLock()
{
+ if (m_vm)
+ m_vm->setStackPointerAtVMEntry(nullptr);
+
+ if (m_entryAtomicStringTable) {
+ wtfThreadData().setCurrentAtomicStringTable(m_entryAtomicStringTable);
+ m_entryAtomicStringTable = nullptr;
+ }
}
-// If threading support is off, set the lock count to a constant value of 1 so ssertions
-// that the lock is held don't fail
-intptr_t JSLock::lockCount()
+void JSLock::lock(ExecState* exec)
{
- return 1;
+ exec->vm().apiLock().lock();
}
-bool JSLock::currentThreadIsHoldingLock()
+void JSLock::unlock(ExecState* exec)
{
- return true;
+ exec->vm().apiLock().unlock();
}
-void JSLock::lock(JSLockBehavior)
+bool JSLock::currentThreadIsHoldingLock()
{
+ ASSERT(!m_hasExclusiveThread || (exclusiveThread() == std::this_thread::get_id()));
+ if (m_hasExclusiveThread)
+ return !!m_lockCount;
+ return m_ownerThreadID == std::this_thread::get_id();
}
-void JSLock::unlock(JSLockBehavior)
+// This function returns the number of locks that were dropped.
+unsigned JSLock::dropAllLocks(DropAllLocks* dropper)
{
+ if (m_hasExclusiveThread) {
+ ASSERT(exclusiveThread() == std::this_thread::get_id());
+ return 0;
+ }
+
+ // Check if this thread is currently holding the lock.
+ // FIXME: Maybe we want to require this, guard with an ASSERT?
+ if (!currentThreadIsHoldingLock())
+ return 0;
+
+ ++m_lockDropDepth;
+
+ dropper->setDropDepth(m_lockDropDepth);
+
+ WTFThreadData& threadData = wtfThreadData();
+ threadData.setSavedStackPointerAtVMEntry(m_vm->stackPointerAtVMEntry());
+ threadData.setSavedLastStackTop(m_vm->lastStackTop());
+
+ unsigned droppedLockCount = m_lockCount;
+ unlock(droppedLockCount);
+
+ return droppedLockCount;
}
-void JSLock::lock(ExecState*)
+void JSLock::grabAllLocks(DropAllLocks* dropper, unsigned droppedLockCount)
{
+ ASSERT(!m_hasExclusiveThread || !droppedLockCount);
+
+ // If no locks were dropped, nothing to do!
+ if (!droppedLockCount)
+ return;
+
+ ASSERT(!currentThreadIsHoldingLock());
+ lock(droppedLockCount);
+
+ while (dropper->dropDepth() != m_lockDropDepth) {
+ unlock(droppedLockCount);
+ std::this_thread::yield();
+ lock(droppedLockCount);
+ }
+
+ --m_lockDropDepth;
+
+ WTFThreadData& threadData = wtfThreadData();
+ m_vm->setStackPointerAtVMEntry(threadData.savedStackPointerAtVMEntry());
+ m_vm->setLastStackTop(threadData.savedLastStackTop());
}
-void JSLock::unlock(ExecState*)
+JSLock::DropAllLocks::DropAllLocks(VM* vm)
+ : m_droppedLockCount(0)
+ // If the VM is in the middle of being destroyed then we don't want to resurrect it
+ // by allowing DropAllLocks to ref it. By this point the JSLock has already been
+ // released anyways, so it doesn't matter that DropAllLocks is a no-op.
+ , m_vm(vm->refCount() ? vm : nullptr)
{
+ if (!m_vm)
+ return;
+ wtfThreadData().resetCurrentAtomicStringTable();
+ RELEASE_ASSERT(!m_vm->isCollectorBusy());
+ m_droppedLockCount = m_vm->apiLock().dropAllLocks(this);
}
-JSLock::DropAllLocks::DropAllLocks(ExecState*)
+JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
+ : DropAllLocks(exec ? &exec->vm() : nullptr)
{
}
-JSLock::DropAllLocks::DropAllLocks(JSLockBehavior)
+JSLock::DropAllLocks::DropAllLocks(VM& vm)
+ : DropAllLocks(&vm)
{
}
JSLock::DropAllLocks::~DropAllLocks()
{
+ if (!m_vm)
+ return;
+ m_vm->apiLock().grabAllLocks(this, m_droppedLockCount);
+ wtfThreadData().setCurrentAtomicStringTable(m_vm->atomicStringTable());
}
-#endif // USE(MULTIPLE_THREADS)
-
} // namespace JSC