2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGJITCompiler_h
27 #define DFGJITCompiler_h
31 #include "CCallHelpers.h"
32 #include "CodeBlock.h"
33 #include "DFGDisassembler.h"
35 #include "DFGInlineCacheWrapper.h"
36 #include "DFGJITCode.h"
37 #include "DFGOSRExitCompilationInfo.h"
38 #include "DFGRegisterBank.h"
42 #include "JITInlineCacheGenerator.h"
43 #include "LinkBuffer.h"
44 #include "MacroAssembler.h"
45 #include "TempRegisterSet.h"
49 class AbstractSamplingCounter
;
55 class JITCodeGenerator
;
56 class NodeToRegisterMap
;
57 class OSRExitJumpPlaceholder
;
58 class SlowPathGenerator
;
60 class SpeculationRecovery
;
65 // === CallLinkRecord ===
67 // A record of a call out from JIT code that needs linking to a helper function.
68 // Every CallLinkRecord contains a reference to the call instruction & the function
69 // that it needs to be linked to.
70 struct CallLinkRecord
{
71 CallLinkRecord(MacroAssembler::Call call
, FunctionPtr function
)
73 , m_function(function
)
77 MacroAssembler::Call m_call
;
78 FunctionPtr m_function
;
83 MacroAssembler::PatchableJump jump
, MacroAssembler::Label done
,
84 SlowPathGenerator
* slowPathGenerator
, StructureStubInfo
* stubInfo
)
87 , m_slowPathGenerator(slowPathGenerator
)
88 , m_stubInfo(stubInfo
)
92 MacroAssembler::PatchableJump m_jump
;
93 MacroAssembler::Label m_done
;
94 SlowPathGenerator
* m_slowPathGenerator
;
95 StructureStubInfo
* m_stubInfo
;
98 // === JITCompiler ===
100 // DFG::JITCompiler is responsible for generating JIT code from the dataflow graph.
101 // It does so by delegating to the speculative & non-speculative JITs, which
102 // generate to a MacroAssembler (which the JITCompiler owns through an inheritance
103 // relationship). The JITCompiler holds references to information required during
104 // compilation, and also records information used in linking (e.g. a list of all
105 // call to be linked).
106 class JITCompiler
: public CCallHelpers
{
108 JITCompiler(Graph
& dfg
);
112 void compileFunction();
117 // Accessors for properties.
118 Graph
& graph() { return m_graph
; }
120 // Methods to set labels for the disassembler.
121 void setStartOfCode()
123 if (LIKELY(!m_disassembler
))
125 m_disassembler
->setStartOfCode(labelIgnoringWatchpoints());
128 void setForBlockIndex(BlockIndex blockIndex
)
130 if (LIKELY(!m_disassembler
))
132 m_disassembler
->setForBlockIndex(blockIndex
, labelIgnoringWatchpoints());
135 void setForNode(Node
* node
)
137 if (LIKELY(!m_disassembler
))
139 m_disassembler
->setForNode(node
, labelIgnoringWatchpoints());
142 void setEndOfMainPath()
144 if (LIKELY(!m_disassembler
))
146 m_disassembler
->setEndOfMainPath(labelIgnoringWatchpoints());
151 if (LIKELY(!m_disassembler
))
153 m_disassembler
->setEndOfCode(labelIgnoringWatchpoints());
156 void emitStoreCodeOrigin(CodeOrigin codeOrigin
)
158 unsigned index
= m_jitCode
->common
.addCodeOrigin(codeOrigin
);
159 unsigned locationBits
= CallFrame::Location::encodeAsCodeOriginIndex(index
);
160 store32(TrustedImm32(locationBits
), tagFor(static_cast<VirtualRegister
>(JSStack::ArgumentCount
)));
163 // Add a call out from JIT code, without an exception check.
164 Call
appendCall(const FunctionPtr
& function
)
166 Call functionCall
= call();
167 m_calls
.append(CallLinkRecord(functionCall
, function
));
171 void exceptionCheck(Jump jumpToHandler
)
173 m_exceptionChecks
.append(jumpToHandler
);
176 void exceptionCheck()
178 m_exceptionChecks
.append(emitExceptionCheck());
181 void exceptionCheckWithCallFrameRollback()
183 m_exceptionChecksWithCallFrameRollback
.append(emitExceptionCheck());
186 // Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
187 void fastExceptionCheck()
190 m_exceptionChecks
.append(branchTestPtr(Zero
, GPRInfo::returnValueGPR
));
193 OSRExitCompilationInfo
& appendExitInfo(MacroAssembler::JumpList jumpsToFail
= MacroAssembler::JumpList())
195 OSRExitCompilationInfo info
;
196 info
.m_failureJumps
= jumpsToFail
;
197 m_exitCompilationInfo
.append(info
);
198 return m_exitCompilationInfo
.last();
201 #if USE(JSVALUE32_64)
202 void* addressOfDoubleConstant(Node
*);
205 void addGetById(const JITGetByIdGenerator
& gen
, SlowPathGenerator
* slowPath
)
207 m_getByIds
.append(InlineCacheWrapper
<JITGetByIdGenerator
>(gen
, slowPath
));
210 void addPutById(const JITPutByIdGenerator
& gen
, SlowPathGenerator
* slowPath
)
212 m_putByIds
.append(InlineCacheWrapper
<JITPutByIdGenerator
>(gen
, slowPath
));
215 void addIn(const InRecord
& record
)
217 m_ins
.append(record
);
220 unsigned currentJSCallIndex() const
222 return m_jsCalls
.size();
225 void addJSCall(Call fastCall
, Call slowCall
, DataLabelPtr targetToCheck
, CallLinkInfo
* info
)
227 m_jsCalls
.append(JSCallRecord(fastCall
, slowCall
, targetToCheck
, info
));
230 void addWeakReference(JSCell
* target
)
232 m_graph
.m_plan
.weakReferences
.addLazily(target
);
235 void addWeakReferences(const StructureSet
& structureSet
)
237 for (unsigned i
= structureSet
.size(); i
--;)
238 addWeakReference(structureSet
[i
]);
242 Jump
branchWeakPtr(RelationalCondition cond
, T left
, JSCell
* weakPtr
)
244 Jump result
= branchPtr(cond
, left
, TrustedImmPtr(weakPtr
));
245 addWeakReference(weakPtr
);
250 Jump
branchWeakStructure(RelationalCondition cond
, T left
, Structure
* weakStructure
)
253 Jump result
= branch32(cond
, left
, TrustedImm32(weakStructure
->id()));
254 addWeakReference(weakStructure
);
257 return branchWeakPtr(cond
, left
, weakStructure
);
262 Jump
branchStructurePtr(RelationalCondition cond
, T left
, Structure
* structure
)
265 return branch32(cond
, left
, TrustedImm32(structure
->id()));
267 return branchPtr(cond
, left
, TrustedImmPtr(structure
));
271 void noticeOSREntry(BasicBlock
& basicBlock
, JITCompiler::Label blockHead
, LinkBuffer
& linkBuffer
)
273 // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
274 if (!basicBlock
.cfaHasVisited
)
277 OSREntryData
* entry
= m_jitCode
->appendOSREntryData(basicBlock
.bytecodeBegin
, linkBuffer
.offsetOf(blockHead
));
279 entry
->m_expectedValues
= basicBlock
.valuesAtHead
;
281 // Fix the expected values: in our protocol, a dead variable will have an expected
282 // value of (None, []). But the old JIT may stash some values there. So we really
284 for (size_t argument
= 0; argument
< basicBlock
.variablesAtHead
.numberOfArguments(); ++argument
) {
285 Node
* node
= basicBlock
.variablesAtHead
.argument(argument
);
286 if (!node
|| !node
->shouldGenerate())
287 entry
->m_expectedValues
.argument(argument
).makeHeapTop();
289 for (size_t local
= 0; local
< basicBlock
.variablesAtHead
.numberOfLocals(); ++local
) {
290 Node
* node
= basicBlock
.variablesAtHead
.local(local
);
291 if (!node
|| !node
->shouldGenerate())
292 entry
->m_expectedValues
.local(local
).makeHeapTop();
294 VariableAccessData
* variable
= node
->variableAccessData();
295 entry
->m_machineStackUsed
.set(variable
->machineLocal().toLocal());
297 switch (variable
->flushFormat()) {
299 entry
->m_localsForcedDouble
.set(local
);
302 entry
->m_localsForcedMachineInt
.set(local
);
308 if (variable
->local() != variable
->machineLocal()) {
309 entry
->m_reshufflings
.append(
311 variable
->local().offset(), variable
->machineLocal().offset()));
316 entry
->m_reshufflings
.shrinkToFit();
319 PassRefPtr
<JITCode
> jitCode() { return m_jitCode
; }
321 Vector
<Label
>& blockHeads() { return m_blockHeads
; }
324 friend class OSRExitJumpPlaceholder
;
326 // Internal implementation to compile.
329 void link(LinkBuffer
&);
331 void exitSpeculativeWithOSR(const OSRExit
&, SpeculationRecovery
*);
332 void compileExceptionHandlers();
334 void disassemble(LinkBuffer
&);
336 // The dataflow graph currently being generated.
339 OwnPtr
<Disassembler
> m_disassembler
;
341 RefPtr
<JITCode
> m_jitCode
;
343 // Vector of calls out from JIT code, including exception handler information.
344 // Count of the number of CallRecords with exception handlers.
345 Vector
<CallLinkRecord
> m_calls
;
346 JumpList m_exceptionChecks
;
347 JumpList m_exceptionChecksWithCallFrameRollback
;
349 Vector
<Label
> m_blockHeads
;
351 struct JSCallRecord
{
352 JSCallRecord(Call fastCall
, Call slowCall
, DataLabelPtr targetToCheck
, CallLinkInfo
* info
)
353 : m_fastCall(fastCall
)
354 , m_slowCall(slowCall
)
355 , m_targetToCheck(targetToCheck
)
362 DataLabelPtr m_targetToCheck
;
363 CallLinkInfo
* m_info
;
366 Vector
<InlineCacheWrapper
<JITGetByIdGenerator
>, 4> m_getByIds
;
367 Vector
<InlineCacheWrapper
<JITPutByIdGenerator
>, 4> m_putByIds
;
368 Vector
<InRecord
, 4> m_ins
;
369 Vector
<JSCallRecord
, 4> m_jsCalls
;
370 SegmentedVector
<OSRExitCompilationInfo
, 4> m_exitCompilationInfo
;
371 Vector
<Vector
<Label
>> m_exitSiteLabels
;
373 Call m_callArityFixup
;
375 OwnPtr
<SpeculativeJIT
> m_speculative
;
378 } } // namespace JSC::DFG