+CodeBlock* ProgramCodeBlock::replacement()
+{
+ return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode();
+}
+
+CodeBlock* EvalCodeBlock::replacement()
+{
+ return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode();
+}
+
+CodeBlock* FunctionCodeBlock::replacement()
+{
+ return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ return error;
+}
+
+JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ return error;
+}
+
+JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, m_isConstructor ? CodeForConstruct : CodeForCall);
+ return error;
+}
+
+bool ProgramCodeBlock::canCompileWithDFGInternal()
+{
+ return DFG::canCompileProgram(this);
+}
+
+bool EvalCodeBlock::canCompileWithDFGInternal()
+{
+ return DFG::canCompileEval(this);
+}
+
+bool FunctionCodeBlock::canCompileWithDFGInternal()
+{
+ if (m_isConstructor)
+ return DFG::canCompileFunctionForConstruct(this);
+ return DFG::canCompileFunctionForCall(this);
+}
+
+void ProgramCodeBlock::jettison()
+{
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
+ ASSERT(this == replacement());
+ static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+}
+
+void EvalCodeBlock::jettison()
+{
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
+ ASSERT(this == replacement());
+ static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+}
+
+void FunctionCodeBlock::jettison()
+{
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
+ ASSERT(this == replacement());
+ static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+bool ProgramCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(globalData);
+}
+
+bool EvalCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(globalData);
+}
+
+bool FunctionCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<FunctionExecutable*>(ownerExecutable())->jitCompileFor(globalData, m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+bool CodeBlock::shouldOptimizeNow()
+{
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("Considering optimizing %p...\n", this);
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ dumpValueProfiles();
+#endif
+
+ if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay)
+ return true;
+
+ unsigned numberOfLiveNonArgumentValueProfiles = 0;
+ unsigned numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ unsigned numSamples = profile->totalNumberOfSamples();
+ if (numSamples > ValueProfile::numberOfBuckets)
+ numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
+ numberOfSamplesInProfiles += numSamples;
+ if (profile->m_bytecodeOffset < 0) {
+ profile->computeUpdatedPrediction();
+ continue;
+ }
+ if (profile->numberOfSamples() || profile->m_prediction != PredictNone)
+ numberOfLiveNonArgumentValueProfiles++;
+ profile->computeUpdatedPrediction();
+ }
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles());
+#endif
+
+ if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate)
+ && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate)
+ && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay)
+ return true;
+
+ ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
+ m_optimizationDelayCounter++;
+ optimizeAfterWarmUp();
+ return false;
+}
+#endif
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::tallyFrequentExitSites()
+{
+ ASSERT(getJITType() == JITCode::DFGJIT);
+ ASSERT(alternative()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(!!m_dfgData);
+
+ CodeBlock* profiledBlock = alternative();
+
+ for (unsigned i = 0; i < m_dfgData->osrExit.size(); ++i) {
+ DFG::OSRExit& exit = m_dfgData->osrExit[i];
+
+ if (!exit.considerAddingAsFrequentExitSite(this, profiledBlock))
+ continue;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("OSR exit #%u (bc#%u, @%u, %s) for code block %p occurred frequently; counting as frequent exit site.\n", i, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, DFG::exitKindToString(exit.m_kind), this);