]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/CallLinkStatus.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / bytecode / CallLinkStatus.cpp
1 /*
2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "CallLinkStatus.h"
28
29 #include "CallLinkInfo.h"
30 #include "CodeBlock.h"
31 #include "DFGJITCode.h"
32 #include "LLIntCallLinkInfo.h"
33 #include "JSCInlines.h"
34 #include <wtf/CommaPrinter.h>
35
36 namespace JSC {
37
38 static const bool verbose = false;
39
40 CallLinkStatus::CallLinkStatus(JSValue value)
41 : m_callTarget(value)
42 , m_executable(0)
43 , m_structure(0)
44 , m_couldTakeSlowPath(false)
45 , m_isProved(false)
46 {
47 if (!value || !value.isCell())
48 return;
49
50 m_structure = value.asCell()->structure();
51
52 if (!value.asCell()->inherits(JSFunction::info()))
53 return;
54
55 m_executable = jsCast<JSFunction*>(value.asCell())->executable();
56 }
57
58 JSFunction* CallLinkStatus::function() const
59 {
60 if (!m_callTarget || !m_callTarget.isCell())
61 return 0;
62
63 if (!m_callTarget.asCell()->inherits(JSFunction::info()))
64 return 0;
65
66 return jsCast<JSFunction*>(m_callTarget.asCell());
67 }
68
69 InternalFunction* CallLinkStatus::internalFunction() const
70 {
71 if (!m_callTarget || !m_callTarget.isCell())
72 return 0;
73
74 if (!m_callTarget.asCell()->inherits(InternalFunction::info()))
75 return 0;
76
77 return jsCast<InternalFunction*>(m_callTarget.asCell());
78 }
79
80 Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
81 {
82 if (!m_executable)
83 return NoIntrinsic;
84
85 return m_executable->intrinsicFor(kind);
86 }
87
88 CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
89 {
90 UNUSED_PARAM(profiledBlock);
91 UNUSED_PARAM(bytecodeIndex);
92 #if ENABLE(DFG_JIT)
93 if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction))) {
94 // We could force this to be a closure call, but instead we'll just assume that it
95 // takes slow path.
96 return takesSlowPath();
97 }
98 #else
99 UNUSED_PARAM(locker);
100 #endif
101
102 VM& vm = *profiledBlock->vm();
103
104 Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
105 OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
106 if (op != op_call && op != op_construct)
107 return CallLinkStatus();
108
109 LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
110
111 return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
112 }
113
114 CallLinkStatus CallLinkStatus::computeFor(
115 CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
116 {
117 ConcurrentJITLocker locker(profiledBlock->m_lock);
118
119 UNUSED_PARAM(profiledBlock);
120 UNUSED_PARAM(bytecodeIndex);
121 UNUSED_PARAM(map);
122 #if ENABLE(DFG_JIT)
123 if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
124 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCacheWatchpoint))
125 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable)))
126 return takesSlowPath();
127
128 CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
129 if (!callLinkInfo)
130 return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
131
132 CallLinkStatus result = computeFor(locker, *callLinkInfo);
133 if (!result)
134 return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
135
136 if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction)))
137 result.makeClosureCall();
138
139 return result;
140 #else
141 return CallLinkStatus();
142 #endif
143 }
144
145 #if ENABLE(JIT)
146 CallLinkStatus CallLinkStatus::computeFor(const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
147 {
148 // Note that despite requiring that the locker is held, this code is racy with respect
149 // to the CallLinkInfo: it may get cleared while this code runs! This is because
150 // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
151 // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
152 // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
153 // itself to figure out which lock to lock.
154 //
155 // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
156 // path count, the stub, and the target - can all be asked racily. Stubs and targets can
157 // only be deleted at next GC, so if we load a non-null one, then it must contain data
158 // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
159 // is probably OK for now.
160
161 if (callLinkInfo.slowPathCount >= Options::couldTakeSlowCaseMinimumCount())
162 return takesSlowPath();
163
164 if (ClosureCallStubRoutine* stub = callLinkInfo.stub.get())
165 return CallLinkStatus(stub->executable(), stub->structure());
166
167 JSFunction* target = callLinkInfo.lastSeenCallee.get();
168 if (!target)
169 return CallLinkStatus();
170
171 if (callLinkInfo.hasSeenClosure)
172 return CallLinkStatus(target->executable(), target->structure());
173
174 return CallLinkStatus(target);
175 }
176 #endif
177
178 void CallLinkStatus::computeDFGStatuses(
179 CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
180 {
181 #if ENABLE(DFG_JIT)
182 RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
183 CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
184 for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
185 CallLinkInfo& info = **iter;
186 CodeOrigin codeOrigin = info.codeOrigin;
187
188 bool takeSlowPath;
189 bool badFunction;
190
191 // Check if we had already previously made a terrible mistake in the FTL for this
192 // code origin. Note that this is approximate because we could have a monovariant
193 // inline in the FTL that ended up failing. We should fix that at some point by
194 // having data structures to track the context of frequent exits. This is currently
195 // challenging because it would require creating a CodeOrigin-based database in
196 // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
197 // InlineCallFrames.
198 CodeBlock* currentBaseline =
199 baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
200 {
201 ConcurrentJITLocker locker(currentBaseline->m_lock);
202 takeSlowPath =
203 currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCache, ExitFromFTL))
204 || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCacheWatchpoint, ExitFromFTL))
205 || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadExecutable, ExitFromFTL));
206 badFunction =
207 currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadFunction, ExitFromFTL));
208 }
209
210 {
211 ConcurrentJITLocker locker(dfgCodeBlock->m_lock);
212 if (takeSlowPath)
213 map.add(info.codeOrigin, takesSlowPath());
214 else {
215 CallLinkStatus status = computeFor(locker, info);
216 if (status.isSet()) {
217 if (badFunction)
218 status.makeClosureCall();
219 map.add(info.codeOrigin, status);
220 }
221 }
222 }
223 }
224 #else
225 UNUSED_PARAM(dfgCodeBlock);
226 #endif // ENABLE(DFG_JIT)
227
228 if (verbose) {
229 dataLog("Context map:\n");
230 ContextMap::iterator iter = map.begin();
231 ContextMap::iterator end = map.end();
232 for (; iter != end; ++iter) {
233 dataLog(" ", iter->key, ":\n");
234 dataLog(" ", iter->value, "\n");
235 }
236 }
237 }
238
239 CallLinkStatus CallLinkStatus::computeFor(
240 CodeBlock* profiledBlock, CodeOrigin codeOrigin,
241 const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
242 {
243 auto iter = dfgMap.find(codeOrigin);
244 if (iter != dfgMap.end())
245 return iter->value;
246
247 return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
248 }
249
250 void CallLinkStatus::dump(PrintStream& out) const
251 {
252 if (!isSet()) {
253 out.print("Not Set");
254 return;
255 }
256
257 CommaPrinter comma;
258
259 if (m_isProved)
260 out.print(comma, "Statically Proved");
261
262 if (m_couldTakeSlowPath)
263 out.print(comma, "Could Take Slow Path");
264
265 if (m_callTarget)
266 out.print(comma, "Known target: ", m_callTarget);
267
268 if (m_executable) {
269 out.print(comma, "Executable/CallHash: ", RawPointer(m_executable));
270 if (!isCompilationThread())
271 out.print("/", m_executable->hashFor(CodeForCall));
272 }
273
274 if (m_structure)
275 out.print(comma, "Structure: ", RawPointer(m_structure));
276 }
277
278 } // namespace JSC
279