2 * Copyright (C) 2005, 2008, 2012 Apple Inc. All rights reserved.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the NU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
25 #include "CallFrame.h"
26 #include "JSGlobalObject.h"
28 #include "Operations.h"
36 Mutex
* GlobalJSLock::s_sharedInstanceLock
= 0;
38 GlobalJSLock::GlobalJSLock()
40 s_sharedInstanceLock
->lock();
43 GlobalJSLock::~GlobalJSLock()
45 s_sharedInstanceLock
->unlock();
48 void GlobalJSLock::initialize()
50 s_sharedInstanceLock
= new Mutex();
53 JSLockHolder::JSLockHolder(ExecState
* exec
)
59 JSLockHolder::JSLockHolder(VM
* vm
)
65 JSLockHolder::JSLockHolder(VM
& vm
)
71 void JSLockHolder::init()
73 m_vm
->apiLock().lock();
76 JSLockHolder::~JSLockHolder()
78 RefPtr
<JSLock
> apiLock(&m_vm
->apiLock());
83 JSLock::JSLock(VM
* vm
)
96 void JSLock::willDestroyVM(VM
* vm
)
98 ASSERT_UNUSED(vm
, m_vm
== vm
);
104 ThreadIdentifier currentThread
= WTF::currentThread();
106 SpinLockHolder
holder(&m_spinLock
);
107 if (m_ownerThread
== currentThread
&& m_lockCount
) {
116 SpinLockHolder
holder(&m_spinLock
);
117 m_ownerThread
= currentThread
;
118 ASSERT(!m_lockCount
);
123 void JSLock::unlock()
125 SpinLockHolder
holder(&m_spinLock
);
126 ASSERT(currentThreadIsHoldingLock());
134 void JSLock::lock(ExecState
* exec
)
136 exec
->vm().apiLock().lock();
139 void JSLock::unlock(ExecState
* exec
)
141 exec
->vm().apiLock().unlock();
144 bool JSLock::currentThreadIsHoldingLock()
146 return m_lockCount
&& m_ownerThread
== WTF::currentThread();
149 // This is fairly nasty. We allow multiple threads to run on the same
150 // context, and we do not require any locking semantics in doing so -
151 // clients of the API may simply use the context from multiple threads
152 // concurently, and assume this will work. In order to make this work,
153 // We lock the context when a thread enters, and unlock it when it leaves.
154 // However we do not only unlock when the thread returns from its
155 // entry point (evaluate script or call function), we also unlock the
156 // context if the thread leaves JSC by making a call out to an external
157 // function through a callback.
159 // All threads using the context share the same JS stack (the JSStack).
160 // Whenever a thread calls into JSC it starts using the JSStack from the
161 // previous 'high water mark' - the maximum point the stack has ever grown to
162 // (returned by JSStack::end()). So if a first thread calls out to a
163 // callback, and a second thread enters JSC, then also exits by calling out
164 // to a callback, we can be left with stackframes from both threads in the
165 // JSStack. As such, a problem may occur should the first thread's
166 // callback complete first, and attempt to return to JSC. Were we to allow
167 // this to happen, and were its stack to grow further, then it may potentially
168 // write over the second thread's call frames.
170 // To avoid JS stack corruption we enforce a policy of only ever allowing two
171 // threads to use a JS context concurrently, and only allowing the second of
172 // these threads to execute until it has completed and fully returned from its
173 // outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
174 // first time a thread exits it will call DropAllLocks - which will do as expected
175 // and drop locks allowing another thread to enter. Should another thread, or the
176 // same thread again, enter JSC (through evaluate script or call function), and exit
177 // again through a callback, then the locks will not be dropped when DropAllLocks
178 // is called (since lockDropDepth is non-zero). Since this thread is still holding
179 // the locks, only it will be able to re-enter JSC (either be returning from the
180 // callback, or by re-entering through another call to evaulate script or call
183 // This policy is slightly more restricive than it needs to be for correctness -
184 // we could validly allow futher entries into JSC from other threads, we only
185 // need ensure that callbacks return in the reverse chronological order of the
186 // order in which they were made - though implementing the less restrictive policy
187 // would likely increase complexity and overhead.
190 // This function returns the number of locks that were dropped.
191 unsigned JSLock::dropAllLocks(SpinLock
& spinLock
)
194 ASSERT_UNUSED(spinLock
, spinLock
.IsHeld());
195 // Check if this thread is currently holding the lock.
196 // FIXME: Maybe we want to require this, guard with an ASSERT?
197 unsigned lockCount
= m_lockCount
;
198 if (!lockCount
|| m_ownerThread
!= WTF::currentThread())
201 // Don't drop the locks if they've already been dropped once.
202 // (If the prior drop came from another thread, and it resumed first,
203 // it could trash our register file).
207 // m_lockDropDepth is only incremented if any locks were dropped.
213 UNUSED_PARAM(spinLock
);
214 if (m_lockDropDepth
++)
217 return dropAllLocksUnconditionally(spinLock
);
221 unsigned JSLock::dropAllLocksUnconditionally(SpinLock
& spinLock
)
224 ASSERT_UNUSED(spinLock
, spinLock
.IsHeld());
226 // Check if this thread is currently holding the lock.
227 // FIXME: Maybe we want to require this, guard with an ASSERT?
228 lockCount
= m_lockCount
;
229 if (!lockCount
|| m_ownerThread
!= WTF::currentThread())
232 // m_lockDropDepth is only incremented if any locks were dropped.
238 UNUSED_PARAM(spinLock
);
239 unsigned lockCount
= m_lockCount
;
240 for (unsigned i
= 0; i
< lockCount
; i
++)
247 void JSLock::grabAllLocks(unsigned lockCount
, SpinLock
& spinLock
)
250 ASSERT(spinLock
.IsHeld());
251 // If no locks were dropped, nothing to do!
255 ThreadIdentifier currentThread
= WTF::currentThread();
256 // Check if this thread is currently holding the lock.
257 // FIXME: Maybe we want to prohibit this, guard against with an ASSERT?
258 if (m_ownerThread
== currentThread
&& m_lockCount
) {
259 m_lockCount
+= lockCount
;
268 m_ownerThread
= currentThread
;
269 ASSERT(!m_lockCount
);
270 m_lockCount
= lockCount
;
273 UNUSED_PARAM(spinLock
);
274 for (unsigned i
= 0; i
< lockCount
; i
++)
282 JSLock::DropAllLocks::DropAllLocks(ExecState
* exec
, AlwaysDropLocksTag alwaysDropLocks
)
286 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
287 SpinLockHolder
holder(&spinLock
);
289 m_lockCount
= m_vm
->apiLock().dropAllLocksUnconditionally(spinLock
);
291 m_lockCount
= m_vm
->apiLock().dropAllLocks(spinLock
);
294 JSLock::DropAllLocks::DropAllLocks(VM
* vm
, AlwaysDropLocksTag alwaysDropLocks
)
298 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
299 SpinLockHolder
holder(&spinLock
);
301 m_lockCount
= m_vm
->apiLock().dropAllLocksUnconditionally(spinLock
);
303 m_lockCount
= m_vm
->apiLock().dropAllLocks(spinLock
);
306 JSLock::DropAllLocks::~DropAllLocks()
308 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
309 SpinLockHolder
holder(&spinLock
);
310 m_vm
->apiLock().grabAllLocks(m_lockCount
, spinLock
);
313 JSLock::DropAllLocks::DropAllLocks(ExecState
* exec
)
317 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
318 m_lockCount
= m_vm
->apiLock().dropAllLocks(spinLock
);
321 JSLock::DropAllLocks::DropAllLocks(VM
* vm
)
325 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
326 m_lockCount
= m_vm
->apiLock().dropAllLocks(spinLock
);
329 JSLock::DropAllLocks::~DropAllLocks()
331 SpinLock
& spinLock
= m_vm
->apiLock().m_spinLock
;
332 m_vm
->apiLock().grabAllLocks(m_lockCount
, spinLock
);