]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - dfg/DFGJITCompiler.h
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / dfg / DFGJITCompiler.h
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef DFGJITCompiler_h
27#define DFGJITCompiler_h
28
29#if ENABLE(DFG_JIT)
30
31#include "CCallHelpers.h"
32#include "CodeBlock.h"
33#include "DFGDisassembler.h"
34#include "DFGGraph.h"
35#include "DFGInlineCacheWrapper.h"
36#include "DFGJITCode.h"
37#include "DFGOSRExitCompilationInfo.h"
38#include "DFGRegisterBank.h"
39#include "FPRInfo.h"
40#include "GPRInfo.h"
41#include "JITCode.h"
42#include "JITInlineCacheGenerator.h"
43#include "LinkBuffer.h"
44#include "MacroAssembler.h"
45#include "TempRegisterSet.h"
46
47namespace JSC {
48
49class AbstractSamplingCounter;
50class CodeBlock;
51class VM;
52
53namespace DFG {
54
55class JITCodeGenerator;
56class NodeToRegisterMap;
57class OSRExitJumpPlaceholder;
58class SlowPathGenerator;
59class SpeculativeJIT;
60class SpeculationRecovery;
61
62struct EntryLocation;
63struct OSRExit;
64
65// === CallLinkRecord ===
66//
67// A record of a call out from JIT code that needs linking to a helper function.
68// Every CallLinkRecord contains a reference to the call instruction & the function
69// that it needs to be linked to.
70struct CallLinkRecord {
71 CallLinkRecord(MacroAssembler::Call call, FunctionPtr function)
72 : m_call(call)
73 , m_function(function)
74 {
75 }
76
77 MacroAssembler::Call m_call;
78 FunctionPtr m_function;
79};
80
81struct InRecord {
82 InRecord(
83 MacroAssembler::PatchableJump jump, MacroAssembler::Label done,
84 SlowPathGenerator* slowPathGenerator, StructureStubInfo* stubInfo)
85 : m_jump(jump)
86 , m_done(done)
87 , m_slowPathGenerator(slowPathGenerator)
88 , m_stubInfo(stubInfo)
89 {
90 }
91
92 MacroAssembler::PatchableJump m_jump;
93 MacroAssembler::Label m_done;
94 SlowPathGenerator* m_slowPathGenerator;
95 StructureStubInfo* m_stubInfo;
96};
97
98// === JITCompiler ===
99//
100// DFG::JITCompiler is responsible for generating JIT code from the dataflow graph.
101// It does so by delegating to the speculative & non-speculative JITs, which
102// generate to a MacroAssembler (which the JITCompiler owns through an inheritance
103// relationship). The JITCompiler holds references to information required during
104// compilation, and also records information used in linking (e.g. a list of all
105// call to be linked).
106class JITCompiler : public CCallHelpers {
107public:
108 JITCompiler(Graph& dfg);
109 ~JITCompiler();
110
111 void compile();
112 void compileFunction();
113
114 void link();
115 void linkFunction();
116
117 // Accessors for properties.
118 Graph& graph() { return m_graph; }
119
120 // Methods to set labels for the disassembler.
121 void setStartOfCode()
122 {
123 if (LIKELY(!m_disassembler))
124 return;
125 m_disassembler->setStartOfCode(labelIgnoringWatchpoints());
126 }
127
128 void setForBlockIndex(BlockIndex blockIndex)
129 {
130 if (LIKELY(!m_disassembler))
131 return;
132 m_disassembler->setForBlockIndex(blockIndex, labelIgnoringWatchpoints());
133 }
134
135 void setForNode(Node* node)
136 {
137 if (LIKELY(!m_disassembler))
138 return;
139 m_disassembler->setForNode(node, labelIgnoringWatchpoints());
140 }
141
142 void setEndOfMainPath()
143 {
144 if (LIKELY(!m_disassembler))
145 return;
146 m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
147 }
148
149 void setEndOfCode()
150 {
151 if (LIKELY(!m_disassembler))
152 return;
153 m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
154 }
155
156 void emitStoreCodeOrigin(CodeOrigin codeOrigin)
157 {
158 unsigned index = m_jitCode->common.addCodeOrigin(codeOrigin);
159 unsigned locationBits = CallFrame::Location::encodeAsCodeOriginIndex(index);
160 store32(TrustedImm32(locationBits), tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
161 }
162
163 // Add a call out from JIT code, without an exception check.
164 Call appendCall(const FunctionPtr& function)
165 {
166 Call functionCall = call();
167 m_calls.append(CallLinkRecord(functionCall, function));
168 return functionCall;
169 }
170
171 void exceptionCheck(Jump jumpToHandler)
172 {
173 m_exceptionChecks.append(jumpToHandler);
174 }
175
176 void exceptionCheck()
177 {
178 m_exceptionChecks.append(emitExceptionCheck());
179 }
180
181 void exceptionCheckWithCallFrameRollback()
182 {
183 m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck());
184 }
185
186 // Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
187 void fastExceptionCheck()
188 {
189 callExceptionFuzz();
190 m_exceptionChecks.append(branchTestPtr(Zero, GPRInfo::returnValueGPR));
191 }
192
193 OSRExitCompilationInfo& appendExitInfo(MacroAssembler::JumpList jumpsToFail = MacroAssembler::JumpList())
194 {
195 OSRExitCompilationInfo info;
196 info.m_failureJumps = jumpsToFail;
197 m_exitCompilationInfo.append(info);
198 return m_exitCompilationInfo.last();
199 }
200
201#if USE(JSVALUE32_64)
202 void* addressOfDoubleConstant(Node*);
203#endif
204
205 void addGetById(const JITGetByIdGenerator& gen, SlowPathGenerator* slowPath)
206 {
207 m_getByIds.append(InlineCacheWrapper<JITGetByIdGenerator>(gen, slowPath));
208 }
209
210 void addPutById(const JITPutByIdGenerator& gen, SlowPathGenerator* slowPath)
211 {
212 m_putByIds.append(InlineCacheWrapper<JITPutByIdGenerator>(gen, slowPath));
213 }
214
215 void addIn(const InRecord& record)
216 {
217 m_ins.append(record);
218 }
219
220 unsigned currentJSCallIndex() const
221 {
222 return m_jsCalls.size();
223 }
224
225 void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info)
226 {
227 m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, info));
228 }
229
230 void addWeakReference(JSCell* target)
231 {
232 m_graph.m_plan.weakReferences.addLazily(target);
233 }
234
235 void addWeakReferences(const StructureSet& structureSet)
236 {
237 for (unsigned i = structureSet.size(); i--;)
238 addWeakReference(structureSet[i]);
239 }
240
241 template<typename T>
242 Jump branchWeakPtr(RelationalCondition cond, T left, JSCell* weakPtr)
243 {
244 Jump result = branchPtr(cond, left, TrustedImmPtr(weakPtr));
245 addWeakReference(weakPtr);
246 return result;
247 }
248
249 template<typename T>
250 Jump branchWeakStructure(RelationalCondition cond, T left, Structure* weakStructure)
251 {
252#if USE(JSVALUE64)
253 Jump result = branch32(cond, left, TrustedImm32(weakStructure->id()));
254 addWeakReference(weakStructure);
255 return result;
256#else
257 return branchWeakPtr(cond, left, weakStructure);
258#endif
259 }
260
261 template<typename T>
262 Jump branchStructurePtr(RelationalCondition cond, T left, Structure* structure)
263 {
264#if USE(JSVALUE64)
265 return branch32(cond, left, TrustedImm32(structure->id()));
266#else
267 return branchPtr(cond, left, TrustedImmPtr(structure));
268#endif
269 }
270
271 void noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
272 {
273 // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
274 if (!basicBlock.cfaHasVisited)
275 return;
276
277 OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
278
279 entry->m_expectedValues = basicBlock.valuesAtHead;
280
281 // Fix the expected values: in our protocol, a dead variable will have an expected
282 // value of (None, []). But the old JIT may stash some values there. So we really
283 // need (Top, TOP).
284 for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
285 Node* node = basicBlock.variablesAtHead.argument(argument);
286 if (!node || !node->shouldGenerate())
287 entry->m_expectedValues.argument(argument).makeHeapTop();
288 }
289 for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
290 Node* node = basicBlock.variablesAtHead.local(local);
291 if (!node || !node->shouldGenerate())
292 entry->m_expectedValues.local(local).makeHeapTop();
293 else {
294 VariableAccessData* variable = node->variableAccessData();
295 entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
296
297 switch (variable->flushFormat()) {
298 case FlushedDouble:
299 entry->m_localsForcedDouble.set(local);
300 break;
301 case FlushedInt52:
302 entry->m_localsForcedMachineInt.set(local);
303 break;
304 default:
305 break;
306 }
307
308 if (variable->local() != variable->machineLocal()) {
309 entry->m_reshufflings.append(
310 OSREntryReshuffling(
311 variable->local().offset(), variable->machineLocal().offset()));
312 }
313 }
314 }
315
316 entry->m_reshufflings.shrinkToFit();
317 }
318
319 PassRefPtr<JITCode> jitCode() { return m_jitCode; }
320
321 Vector<Label>& blockHeads() { return m_blockHeads; }
322
323private:
324 friend class OSRExitJumpPlaceholder;
325
326 // Internal implementation to compile.
327 void compileEntry();
328 void compileBody();
329 void link(LinkBuffer&);
330
331 void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*);
332 void compileExceptionHandlers();
333 void linkOSRExits();
334 void disassemble(LinkBuffer&);
335
336 // The dataflow graph currently being generated.
337 Graph& m_graph;
338
339 OwnPtr<Disassembler> m_disassembler;
340
341 RefPtr<JITCode> m_jitCode;
342
343 // Vector of calls out from JIT code, including exception handler information.
344 // Count of the number of CallRecords with exception handlers.
345 Vector<CallLinkRecord> m_calls;
346 JumpList m_exceptionChecks;
347 JumpList m_exceptionChecksWithCallFrameRollback;
348
349 Vector<Label> m_blockHeads;
350
351 struct JSCallRecord {
352 JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info)
353 : m_fastCall(fastCall)
354 , m_slowCall(slowCall)
355 , m_targetToCheck(targetToCheck)
356 , m_info(info)
357 {
358 }
359
360 Call m_fastCall;
361 Call m_slowCall;
362 DataLabelPtr m_targetToCheck;
363 CallLinkInfo* m_info;
364 };
365
366 Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds;
367 Vector<InlineCacheWrapper<JITPutByIdGenerator>, 4> m_putByIds;
368 Vector<InRecord, 4> m_ins;
369 Vector<JSCallRecord, 4> m_jsCalls;
370 SegmentedVector<OSRExitCompilationInfo, 4> m_exitCompilationInfo;
371 Vector<Vector<Label>> m_exitSiteLabels;
372
373 Call m_callArityFixup;
374 Label m_arityCheck;
375 OwnPtr<SpeculativeJIT> m_speculative;
376};
377
378} } // namespace JSC::DFG
379
380#endif
381#endif
382