]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/JSLock.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / runtime / JSLock.cpp
1 /*
2 * Copyright (C) 2005, 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the NU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public License
15 * along with this library; see the file COPYING.LIB. If not, write to
16 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
17 * Boston, MA 02110-1301, USA
18 *
19 */
20
21 #include "config.h"
22 #include "JSLock.h"
23
24 #include "Heap.h"
25 #include "CallFrame.h"
26 #include "JSGlobalObject.h"
27 #include "JSObject.h"
28 #include "Operations.h"
29
30 #if USE(PTHREADS)
31 #include <pthread.h>
32 #endif
33
34 namespace JSC {
35
36 Mutex* GlobalJSLock::s_sharedInstanceLock = 0;
37
38 GlobalJSLock::GlobalJSLock()
39 {
40 s_sharedInstanceLock->lock();
41 }
42
43 GlobalJSLock::~GlobalJSLock()
44 {
45 s_sharedInstanceLock->unlock();
46 }
47
48 void GlobalJSLock::initialize()
49 {
50 s_sharedInstanceLock = new Mutex();
51 }
52
53 JSLockHolder::JSLockHolder(ExecState* exec)
54 : m_vm(&exec->vm())
55 {
56 init();
57 }
58
59 JSLockHolder::JSLockHolder(VM* vm)
60 : m_vm(vm)
61 {
62 init();
63 }
64
65 JSLockHolder::JSLockHolder(VM& vm)
66 : m_vm(&vm)
67 {
68 init();
69 }
70
71 void JSLockHolder::init()
72 {
73 m_vm->apiLock().lock();
74 }
75
76 JSLockHolder::~JSLockHolder()
77 {
78 RefPtr<JSLock> apiLock(&m_vm->apiLock());
79 m_vm.clear();
80 apiLock->unlock();
81 }
82
83 JSLock::JSLock(VM* vm)
84 : m_ownerThread(0)
85 , m_lockCount(0)
86 , m_lockDropDepth(0)
87 , m_vm(vm)
88 {
89 m_spinLock.Init();
90 }
91
92 JSLock::~JSLock()
93 {
94 }
95
96 void JSLock::willDestroyVM(VM* vm)
97 {
98 ASSERT_UNUSED(vm, m_vm == vm);
99 m_vm = 0;
100 }
101
102 void JSLock::lock()
103 {
104 ThreadIdentifier currentThread = WTF::currentThread();
105 {
106 SpinLockHolder holder(&m_spinLock);
107 if (m_ownerThread == currentThread && m_lockCount) {
108 m_lockCount++;
109 return;
110 }
111 }
112
113 m_lock.lock();
114
115 {
116 SpinLockHolder holder(&m_spinLock);
117 m_ownerThread = currentThread;
118 ASSERT(!m_lockCount);
119 m_lockCount = 1;
120 }
121 }
122
123 void JSLock::unlock()
124 {
125 SpinLockHolder holder(&m_spinLock);
126 ASSERT(currentThreadIsHoldingLock());
127
128 m_lockCount--;
129
130 if (!m_lockCount)
131 m_lock.unlock();
132 }
133
134 void JSLock::lock(ExecState* exec)
135 {
136 exec->vm().apiLock().lock();
137 }
138
139 void JSLock::unlock(ExecState* exec)
140 {
141 exec->vm().apiLock().unlock();
142 }
143
144 bool JSLock::currentThreadIsHoldingLock()
145 {
146 return m_lockCount && m_ownerThread == WTF::currentThread();
147 }
148
149 // This is fairly nasty. We allow multiple threads to run on the same
150 // context, and we do not require any locking semantics in doing so -
151 // clients of the API may simply use the context from multiple threads
152 // concurently, and assume this will work. In order to make this work,
153 // We lock the context when a thread enters, and unlock it when it leaves.
154 // However we do not only unlock when the thread returns from its
155 // entry point (evaluate script or call function), we also unlock the
156 // context if the thread leaves JSC by making a call out to an external
157 // function through a callback.
158 //
159 // All threads using the context share the same JS stack (the JSStack).
160 // Whenever a thread calls into JSC it starts using the JSStack from the
161 // previous 'high water mark' - the maximum point the stack has ever grown to
162 // (returned by JSStack::end()). So if a first thread calls out to a
163 // callback, and a second thread enters JSC, then also exits by calling out
164 // to a callback, we can be left with stackframes from both threads in the
165 // JSStack. As such, a problem may occur should the first thread's
166 // callback complete first, and attempt to return to JSC. Were we to allow
167 // this to happen, and were its stack to grow further, then it may potentially
168 // write over the second thread's call frames.
169 //
170 // To avoid JS stack corruption we enforce a policy of only ever allowing two
171 // threads to use a JS context concurrently, and only allowing the second of
172 // these threads to execute until it has completed and fully returned from its
173 // outermost call into JSC. We enforce this policy using 'lockDropDepth'. The
174 // first time a thread exits it will call DropAllLocks - which will do as expected
175 // and drop locks allowing another thread to enter. Should another thread, or the
176 // same thread again, enter JSC (through evaluate script or call function), and exit
177 // again through a callback, then the locks will not be dropped when DropAllLocks
178 // is called (since lockDropDepth is non-zero). Since this thread is still holding
179 // the locks, only it will be able to re-enter JSC (either be returning from the
180 // callback, or by re-entering through another call to evaulate script or call
181 // function).
182 //
183 // This policy is slightly more restricive than it needs to be for correctness -
184 // we could validly allow futher entries into JSC from other threads, we only
185 // need ensure that callbacks return in the reverse chronological order of the
186 // order in which they were made - though implementing the less restrictive policy
187 // would likely increase complexity and overhead.
188 //
189
190 // This function returns the number of locks that were dropped.
191 unsigned JSLock::dropAllLocks(SpinLock& spinLock)
192 {
193 #if PLATFORM(IOS)
194 ASSERT_UNUSED(spinLock, spinLock.IsHeld());
195 // Check if this thread is currently holding the lock.
196 // FIXME: Maybe we want to require this, guard with an ASSERT?
197 unsigned lockCount = m_lockCount;
198 if (!lockCount || m_ownerThread != WTF::currentThread())
199 return 0;
200
201 // Don't drop the locks if they've already been dropped once.
202 // (If the prior drop came from another thread, and it resumed first,
203 // it could trash our register file).
204 if (m_lockDropDepth)
205 return 0;
206
207 // m_lockDropDepth is only incremented if any locks were dropped.
208 m_lockDropDepth++;
209 m_lockCount = 0;
210 m_lock.unlock();
211 return lockCount;
212 #else
213 UNUSED_PARAM(spinLock);
214 if (m_lockDropDepth++)
215 return 0;
216
217 return dropAllLocksUnconditionally(spinLock);
218 #endif
219 }
220
221 unsigned JSLock::dropAllLocksUnconditionally(SpinLock& spinLock)
222 {
223 #if PLATFORM(IOS)
224 ASSERT_UNUSED(spinLock, spinLock.IsHeld());
225 unsigned lockCount;
226 // Check if this thread is currently holding the lock.
227 // FIXME: Maybe we want to require this, guard with an ASSERT?
228 lockCount = m_lockCount;
229 if (!lockCount || m_ownerThread != WTF::currentThread())
230 return 0;
231
232 // m_lockDropDepth is only incremented if any locks were dropped.
233 m_lockDropDepth++;
234 m_lockCount = 0;
235 m_lock.unlock();
236 return lockCount;
237 #else
238 UNUSED_PARAM(spinLock);
239 unsigned lockCount = m_lockCount;
240 for (unsigned i = 0; i < lockCount; i++)
241 unlock();
242
243 return lockCount;
244 #endif
245 }
246
247 void JSLock::grabAllLocks(unsigned lockCount, SpinLock& spinLock)
248 {
249 #if PLATFORM(IOS)
250 ASSERT(spinLock.IsHeld());
251 // If no locks were dropped, nothing to do!
252 if (!lockCount)
253 return;
254
255 ThreadIdentifier currentThread = WTF::currentThread();
256 // Check if this thread is currently holding the lock.
257 // FIXME: Maybe we want to prohibit this, guard against with an ASSERT?
258 if (m_ownerThread == currentThread && m_lockCount) {
259 m_lockCount += lockCount;
260 m_lockDropDepth--;
261 return;
262 }
263
264 spinLock.Unlock();
265 m_lock.lock();
266 spinLock.Lock();
267
268 m_ownerThread = currentThread;
269 ASSERT(!m_lockCount);
270 m_lockCount = lockCount;
271 m_lockDropDepth--;
272 #else
273 UNUSED_PARAM(spinLock);
274 for (unsigned i = 0; i < lockCount; i++)
275 lock();
276
277 m_lockDropDepth--;
278 #endif
279 }
280
281 #if PLATFORM(IOS)
282 JSLock::DropAllLocks::DropAllLocks(ExecState* exec, AlwaysDropLocksTag alwaysDropLocks)
283 : m_lockCount(0)
284 , m_vm(&exec->vm())
285 {
286 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
287 SpinLockHolder holder(&spinLock);
288 if (alwaysDropLocks)
289 m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
290 else
291 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
292 }
293
294 JSLock::DropAllLocks::DropAllLocks(VM* vm, AlwaysDropLocksTag alwaysDropLocks)
295 : m_lockCount(0)
296 , m_vm(vm)
297 {
298 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
299 SpinLockHolder holder(&spinLock);
300 if (alwaysDropLocks)
301 m_lockCount = m_vm->apiLock().dropAllLocksUnconditionally(spinLock);
302 else
303 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
304 }
305
306 JSLock::DropAllLocks::~DropAllLocks()
307 {
308 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
309 SpinLockHolder holder(&spinLock);
310 m_vm->apiLock().grabAllLocks(m_lockCount, spinLock);
311 }
312 #else
313 JSLock::DropAllLocks::DropAllLocks(ExecState* exec)
314 : m_lockCount(0)
315 , m_vm(&exec->vm())
316 {
317 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
318 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
319 }
320
321 JSLock::DropAllLocks::DropAllLocks(VM* vm)
322 : m_lockCount(0)
323 , m_vm(vm)
324 {
325 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
326 m_lockCount = m_vm->apiLock().dropAllLocks(spinLock);
327 }
328
329 JSLock::DropAllLocks::~DropAllLocks()
330 {
331 SpinLock& spinLock = m_vm->apiLock().m_spinLock;
332 m_vm->apiLock().grabAllLocks(m_lockCount, spinLock);
333 }
334 #endif
335
336 } // namespace JSC