]> git.saurik.com Git - apple/javascriptcore.git/blame - runtime/JSLock.cpp
JavaScriptCore-1218.35.tar.gz
[apple/javascriptcore.git] / runtime / JSLock.cpp
CommitLineData
9dae56ea 1/*
93a37866 2 * Copyright (C) 2005, 2008, 2012 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the NU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
18 *
19 */
20
21#include "config.h"
22#include "JSLock.h"
23
14957cd0 24#include "Heap.h"
9dae56ea 25#include "CallFrame.h"
6fe7ccc8 26#include "JSGlobalObject.h"
14957cd0 27#include "JSObject.h"
93a37866 28#include "Operations.h"
9dae56ea 29
14957cd0 30#if USE(PTHREADS)
9dae56ea
A
31#include <pthread.h>
32#endif
33
34namespace JSC {
35
93a37866 36Mutex* GlobalJSLock::s_sharedInstanceLock = 0;
9dae56ea 37
6fe7ccc8
A
38GlobalJSLock::GlobalJSLock()
39{
93a37866 40 s_sharedInstanceLock->lock();
6fe7ccc8 41}
9dae56ea 42
6fe7ccc8 43GlobalJSLock::~GlobalJSLock()
9dae56ea 44{
93a37866
A
45 s_sharedInstanceLock->unlock();
46}
47
48void GlobalJSLock::initialize()
49{
50 s_sharedInstanceLock = new Mutex();
9dae56ea
A
51}
52
6fe7ccc8 53JSLockHolder::JSLockHolder(ExecState* exec)
93a37866
A
54 : m_vm(&exec->vm())
55{
56 init();
57}
58
59JSLockHolder::JSLockHolder(VM* vm)
60 : m_vm(vm)
6fe7ccc8 61{
93a37866 62 init();
6fe7ccc8 63}
9dae56ea 64
93a37866
A
65JSLockHolder::JSLockHolder(VM& vm)
66 : m_vm(&vm)
9dae56ea 67{
93a37866 68 init();
6fe7ccc8 69}
9dae56ea 70
93a37866 71void JSLockHolder::init()
6fe7ccc8 72{
93a37866 73 m_vm->apiLock().lock();
9dae56ea
A
74}
75
6fe7ccc8 76JSLockHolder::~JSLockHolder()
9dae56ea 77{
93a37866
A
78 RefPtr<JSLock> apiLock(&m_vm->apiLock());
79 m_vm.clear();
80 apiLock->unlock();
9dae56ea
A
81}
82
93a37866
A
83JSLock::JSLock(VM* vm)
84 : m_ownerThread(0)
85 , m_lockCount(0)
86 , m_lockDropDepth(0)
87 , m_vm(vm)
9dae56ea 88{
6fe7ccc8 89 m_spinLock.Init();
9dae56ea
A
90}
91
6fe7ccc8 92JSLock::~JSLock()
14957cd0 93{
14957cd0
A
94}
95
93a37866
A
96void JSLock::willDestroyVM(VM* vm)
97{
98 ASSERT_UNUSED(vm, m_vm == vm);
99 m_vm = 0;
100}
101
6fe7ccc8 102void JSLock::lock()
9dae56ea 103{
6fe7ccc8
A
104 ThreadIdentifier currentThread = WTF::currentThread();
105 {
106 SpinLockHolder holder(&m_spinLock);
107 if (m_ownerThread == currentThread && m_lockCount) {
108 m_lockCount++;
109 return;
110 }
111 }
9dae56ea 112
6fe7ccc8 113 m_lock.lock();
9dae56ea 114
6fe7ccc8
A
115 {
116 SpinLockHolder holder(&m_spinLock);
117 m_ownerThread = currentThread;
118 ASSERT(!m_lockCount);
119 m_lockCount = 1;
9dae56ea 120 }
9dae56ea
A
121}
122
6fe7ccc8 123void JSLock::unlock()
9dae56ea 124{
93a37866 125 SpinLockHolder holder(&m_spinLock);
6fe7ccc8 126 ASSERT(currentThreadIsHoldingLock());
9dae56ea 127
6fe7ccc8 128 m_lockCount--;
9dae56ea 129
6fe7ccc8
A
130 if (!m_lockCount)
131 m_lock.unlock();
9dae56ea
A
132}
133
134void JSLock::lock(ExecState* exec)
135{
93a37866 136 exec->vm().apiLock().lock();
9dae56ea
A
137}
138
139void JSLock::unlock(ExecState* exec)
140{
93a37866 141 exec->vm().apiLock().unlock();
9dae56ea
A
142}
143
144bool JSLock::currentThreadIsHoldingLock()
145{
6fe7ccc8 146 return m_lockCount && m_ownerThread == WTF::currentThread();
9dae56ea
A
147}
148
149// This is fairly nasty. We allow multiple threads to run on the same
150// context, and we do not require any locking semantics in doing so -
151// clients of the API may simply use the context from multiple threads
152// concurently, and assume this will work. In order to make this work,
153// We lock the context when a thread enters, and unlock it when it leaves.
154// However we do not only unlock when the thread returns from its
155// entry point (evaluate script or call function), we also unlock the
156// context if the thread leaves JSC by making a call out to an external
157// function through a callback.
158//
93a37866
A
159// All threads using the context share the same JS stack (the JSStack).
160// Whenever a thread calls into JSC it starts using the JSStack from the
9dae56ea 161// previous 'high water mark' - the maximum point the stack has ever grown to
93a37866 162// (returned by JSStack::end()). So if a first thread calls out to a
9dae56ea
A
163// callback, and a second thread enters JSC, then also exits by calling out
164// to a callback, we can be left with stackframes from both threads in the
93a37866 165// JSStack. As such, a problem may occur should the first thread's
9dae56ea
A
166// callback complete first, and attempt to return to JSC. Were we to allow
167// this to happen, and were its stack to grow further, then it may potentially
168// write over the second thread's call frames.
169//
6fe7ccc8 170// To avoid JS stack corruption we enforce a policy of only ever allowing two
9dae56ea
A
171// threads to use a JS context concurrently, and only allowing the second of
172// these threads to execute until it has completed and fully returned from its
173// outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
174// first time a thread exits it will call DropAllLocks - which will do as expected
175// and drop locks allowing another thread to enter. Should another thread, or the
176// same thread again, enter JSC (through evaluate script or call function), and exit
177// again through a callback, then the locks will not be dropped when DropAllLocks
178// is called (since lockDropDepth is non-zero). Since this thread is still holding
6fe7ccc8 179// the locks, only it will be able to re-enter JSC (either be returning from the
9dae56ea
A
180// callback, or by re-entering through another call to evaulate script or call
181// function).
182//
183// This policy is slightly more restricive than it needs to be for correctness -
184// we could validly allow futher entries into JSC from other threads, we only
185// need ensure that callbacks return in the reverse chronological order of the
186// order in which they were made - though implementing the less restrictive policy
187// would likely increase complexity and overhead.
188//
9dae56ea 189
6fe7ccc8 190// This function returns the number of locks that were dropped.
93a37866
A
191unsigned JSLock::dropAllLocks(SpinLock& spinLock)
192{
193#if PLATFORM(IOS)
194 ASSERT_UNUSED(spinLock, spinLock.IsHeld());
195 // Check if this thread is currently holding the lock.
196 // FIXME: Maybe we want to require this, guard with an ASSERT?
197 unsigned lockCount = m_lockCount;
198 if (!lockCount || m_ownerThread != WTF::currentThread())
199 return 0;
9dae56ea 200
6fe7ccc8
A
201 // Don't drop the locks if they've already been dropped once.
202 // (If the prior drop came from another thread, and it resumed first,
203 // it could trash our register file).
204 if (m_lockDropDepth)
205 return 0;
206
207 // m_lockDropDepth is only incremented if any locks were dropped.
208 m_lockDropDepth++;
209 m_lockCount = 0;
210 m_lock.unlock();
211 return lockCount;
93a37866
A
212#else
213 UNUSED_PARAM(spinLock);
214 if (m_lockDropDepth++)
215 return 0;
216
217 return dropAllLocksUnconditionally(spinLock);
218#endif
9dae56ea
A
219}
220
93a37866 221unsigned JSLock::dropAllLocksUnconditionally(SpinLock& spinLock)
9dae56ea 222{
93a37866
A
223#if PLATFORM(IOS)
224 ASSERT_UNUSED(spinLock, spinLock.IsHeld());
6fe7ccc8 225 unsigned lockCount;
93a37866
A
226 // Check if this thread is currently holding the lock.
227 // FIXME: Maybe we want to require this, guard with an ASSERT?
228 lockCount = m_lockCount;
229 if (!lockCount || m_ownerThread != WTF::currentThread())
230 return 0;
9dae56ea 231
6fe7ccc8
A
232 // m_lockDropDepth is only incremented if any locks were dropped.
233 m_lockDropDepth++;
234 m_lockCount = 0;
235 m_lock.unlock();
236 return lockCount;
93a37866
A
237#else
238 UNUSED_PARAM(spinLock);
239 unsigned lockCount = m_lockCount;
240 for (unsigned i = 0; i < lockCount; i++)
241 unlock();
242
243 return lockCount;
244#endif
6fe7ccc8
A
245}
246
93a37866 247void JSLock::grabAllLocks(unsigned lockCount, SpinLock& spinLock)
6fe7ccc8 248{
93a37866
A
249#if PLATFORM(IOS)
250 ASSERT(spinLock.IsHeld());
6fe7ccc8
A
251 // If no locks were dropped, nothing to do!
252 if (!lockCount)
9dae56ea 253 return;
6fe7ccc8
A
254
255 ThreadIdentifier currentThread = WTF::currentThread();
93a37866
A
256 // Check if this thread is currently holding the lock.
257 // FIXME: Maybe we want to prohibit this, guard against with an ASSERT?
258 if (m_ownerThread == currentThread && m_lockCount) {
259 m_lockCount += lockCount;
260 m_lockDropDepth--;
261 return;
6fe7ccc8
A
262 }
263
93a37866 264 spinLock.Unlock();
6fe7ccc8 265 m_lock.lock();
93a37866
A
266 spinLock.Lock();
267
268 m_ownerThread = currentThread;
269 ASSERT(!m_lockCount);
270 m_lockCount = lockCount;
271 m_lockDropDepth--;
272#else
273 UNUSED_PARAM(spinLock);
274 for (unsigned i = 0; i < lockCount; i++)
275 lock();
276
277 m_lockDropDepth--;
278#endif
6fe7ccc8 279}
9dae56ea 280
93a37866 281#if PLATFORM(IOS)
6fe7ccc8
A
282JSLock::DropAllLocks::DropAllLocks(ExecState* exec, AlwaysDropLocksTag alwaysDropLocks)
283 : m_lockCount(0)
93a37866 284 , m_vm(&exec->vm())
6fe7ccc8 285{
93a37866
A
286 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
287 SpinLockHolder holder(&spinLock);
6fe7ccc8 288 if (alwaysDropLocks)
93a37866 289 m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
6fe7ccc8 290 else
93a37866 291 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
6fe7ccc8 292}
9dae56ea 293
93a37866 294JSLock::DropAllLocks::DropAllLocks(VM* vm, AlwaysDropLocksTag alwaysDropLocks)
6fe7ccc8 295 : m_lockCount(0)
93a37866 296 , m_vm(vm)
6fe7ccc8 297{
93a37866
A
298 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
299 SpinLockHolder holder(&spinLock);
6fe7ccc8 300 if (alwaysDropLocks)
93a37866 301 m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
6fe7ccc8 302 else
93a37866 303 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
9dae56ea
A
304}
305
306JSLock::DropAllLocks::~DropAllLocks()
307{
93a37866
A
308 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
309 SpinLockHolder holder(&spinLock);
310 m_vm->apiLock().grabAllLocks(m_lockCount, spinLock);
9dae56ea 311}
93a37866
A
312#else
313JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
314 : m_lockCount(0)
315 , m_vm(&exec->vm())
9dae56ea 316{
93a37866
A
317 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
318 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
9dae56ea
A
319}
320
93a37866
A
321JSLock::DropAllLocks::DropAllLocks(VM* vm)
322 : m_lockCount(0)
323 , m_vm(vm)
9dae56ea 324{
93a37866
A
325 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
326 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
9dae56ea
A
327}
328
329JSLock::DropAllLocks::~DropAllLocks()
330{
93a37866
A
331 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
332 m_vm->apiLock().grabAllLocks(m_lockCount, spinLock);
9dae56ea 333}
93a37866 334#endif
9dae56ea
A
335
336} // namespace JSC