]>
git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGJITCode.cpp
db044e53e5c0cf83fdb3fc8613ca9a4bb1782445
2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGJITCode.h"
31 #include "CodeBlock.h"
32 #include "JSCInlines.h"
33 #include "TrackedReferences.h"
35 namespace JSC
{ namespace DFG
{
38 : DirectJITCode(DFGJIT
)
41 , abandonOSREntry(false)
42 #endif // ENABLE(FTL_JIT)
50 CommonData
* JITCode::dfgCommon()
55 JITCode
* JITCode::dfg()
60 void JITCode::shrinkToFit()
63 osrEntry
.shrinkToFit();
64 osrExit
.shrinkToFit();
65 speculationRecovery
.shrinkToFit();
66 minifiedDFG
.prepareAndShrink();
67 variableEventStream
.shrinkToFit();
70 void JITCode::reconstruct(
71 CodeBlock
* codeBlock
, CodeOrigin codeOrigin
, unsigned streamIndex
,
72 Operands
<ValueRecovery
>& result
)
74 variableEventStream
.reconstruct(
75 codeBlock
, codeOrigin
, minifiedDFG
, streamIndex
, result
);
78 void JITCode::reconstruct(
79 ExecState
* exec
, CodeBlock
* codeBlock
, CodeOrigin codeOrigin
, unsigned streamIndex
,
80 Operands
<JSValue
>& result
)
82 Operands
<ValueRecovery
> recoveries
;
83 reconstruct(codeBlock
, codeOrigin
, streamIndex
, recoveries
);
85 result
= Operands
<JSValue
>(OperandsLike
, recoveries
);
86 for (size_t i
= result
.size(); i
--;)
87 result
[i
] = recoveries
[i
].recover(exec
);
91 bool JITCode::checkIfOptimizationThresholdReached(CodeBlock
* codeBlock
)
93 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
94 return tierUpCounter
.checkIfThresholdCrossedAndSet(codeBlock
->baselineVersion());
97 void JITCode::optimizeNextInvocation(CodeBlock
* codeBlock
)
99 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
100 if (Options::verboseOSR())
101 dataLog(*codeBlock
, ": FTL-optimizing next invocation.\n");
102 tierUpCounter
.setNewThreshold(0, codeBlock
->baselineVersion());
105 void JITCode::dontOptimizeAnytimeSoon(CodeBlock
* codeBlock
)
107 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
108 if (Options::verboseOSR())
109 dataLog(*codeBlock
, ": Not FTL-optimizing anytime soon.\n");
110 tierUpCounter
.deferIndefinitely();
113 void JITCode::optimizeAfterWarmUp(CodeBlock
* codeBlock
)
115 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
116 if (Options::verboseOSR())
117 dataLog(*codeBlock
, ": FTL-optimizing after warm-up.\n");
118 CodeBlock
* baseline
= codeBlock
->baselineVersion();
119 tierUpCounter
.setNewThreshold(
120 baseline
->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
124 void JITCode::optimizeSoon(CodeBlock
* codeBlock
)
126 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
127 if (Options::verboseOSR())
128 dataLog(*codeBlock
, ": FTL-optimizing soon.\n");
129 CodeBlock
* baseline
= codeBlock
->baselineVersion();
130 tierUpCounter
.setNewThreshold(
131 baseline
->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
135 void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock
* codeBlock
)
137 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
138 if (Options::verboseOSR())
139 dataLog(*codeBlock
, ": Forcing slow path concurrently for FTL entry.\n");
140 tierUpCounter
.forceSlowPathConcurrently();
143 void JITCode::setOptimizationThresholdBasedOnCompilationResult(
144 CodeBlock
* codeBlock
, CompilationResult result
)
146 ASSERT(codeBlock
->jitType() == JITCode::DFGJIT
);
148 case CompilationSuccessful
:
149 optimizeNextInvocation(codeBlock
);
150 codeBlock
->baselineVersion()->m_hasBeenCompiledWithFTL
= true;
152 case CompilationFailed
:
153 dontOptimizeAnytimeSoon(codeBlock
);
154 codeBlock
->baselineVersion()->m_didFailFTLCompilation
= true;
156 case CompilationDeferred
:
157 optimizeAfterWarmUp(codeBlock
);
159 case CompilationInvalidated
:
160 // This is weird - it will only happen in cases when the DFG code block (i.e.
161 // the code block that this JITCode belongs to) is also invalidated. So it
162 // doesn't really matter what we do. But, we do the right thing anyway. Note
163 // that us counting the reoptimization actually means that we might count it
164 // twice. But that's generally OK. It's better to overcount reoptimizations
165 // than it is to undercount them.
166 codeBlock
->baselineVersion()->countReoptimization();
167 optimizeAfterWarmUp(codeBlock
);
170 RELEASE_ASSERT_NOT_REACHED();
172 #endif // ENABLE(FTL_JIT)
174 void JITCode::validateReferences(const TrackedReferences
& trackedReferences
)
176 common
.validateReferences(trackedReferences
);
178 for (OSREntryData
& entry
: osrEntry
) {
179 for (unsigned i
= entry
.m_expectedValues
.size(); i
--;)
180 entry
.m_expectedValues
[i
].validateReferences(trackedReferences
);
183 minifiedDFG
.validateReferences(trackedReferences
);
186 } } // namespace JSC::DFG
188 #endif // ENABLE(DFG_JIT)