+void CodeBlock::install()
+{
+ ownerExecutable()->installCode(this);
+}
+
+PassRefPtr<CodeBlock> CodeBlock::newReplacement()
+{
+ return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
+}
+
+const SlowArgument* CodeBlock::machineSlowArguments()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return symbolTable()->slowArguments();
+
+#if ENABLE(DFG_JIT)
+ return jitCode()->dfgCommon()->slowArguments.get();
+#else // ENABLE(DFG_JIT)
+ return 0;
+#endif // ENABLE(DFG_JIT)
+}
+
+#if ENABLE(JIT)
+CodeBlock* ProgramCodeBlock::replacement()
+{
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* EvalCodeBlock::replacement()
+{
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* FunctionCodeBlock::replacement()
+{
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+{
+ return DFG::programCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
+{
+ return DFG::evalCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+{
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+}
+#endif
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode)
+{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldShowDisassembly()) {
+ dataLog("Jettisoning ", *this);
+ if (mode == CountReoptimization)
+ dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason, ".\n");
+ }
+
+ DeferGCForAWhile deferGC(*m_heap);
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+
+ if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+ compilation->setJettisonReason(reason);
+
+ // We want to accomplish two things here:
+ // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
+ // we should OSR exit at the top of the next bytecode instruction after the return.
+ // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
+
+ // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
+ // whether the invalidation has already happened.
+ if (!jitCode()->dfgCommon()->invalidate()) {
+ // Nothing to do since we've already been invalidated. That means that we cannot be
+ // the optimized replacement.
+ RELEASE_ASSERT(this != replacement());
+ return;
+ }
+
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did invalidate ", *this, "\n");
+
+ // Count the reoptimization if that's what the user wanted.
+ if (mode == CountReoptimization) {
+ // FIXME: Maybe this should call alternative().
+ // https://bugs.webkit.org/show_bug.cgi?id=123677
+ baselineAlternative()->countReoptimization();
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did count reoptimization for ", *this, "\n");
+ }
+
+ // Now take care of the entrypoint.
+ if (this != replacement()) {
+ // This means that we were never the entrypoint. This can happen for OSR entry code
+ // blocks.
+ return;
+ }
+ alternative()->optimizeAfterWarmUp();
+ tallyFrequentExitSites();
+ alternative()->install();
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did install baseline version of ", *this, "\n");
+#else // ENABLE(DFG_JIT)
+ UNUSED_PARAM(mode);
+ UNREACHABLE_FOR_PLATFORM();
+#endif // ENABLE(DFG_JIT)
+}
+
+JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return globalObject();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+}
+
+void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
+{
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ if (Options::verboseCallLink())
+ dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
+
+ if (!m_shouldAlwaysBeInlined)
+ return;
+
+#if ENABLE(DFG_JIT)
+ if (!hasBaselineJITProfiling())
+ return;
+
+ if (!DFG::mightInlineFunction(this))
+ return;
+
+ if (!canInline(m_capabilityLevelState))
+ return;
+
+ if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is too large.\n");
+ return;
+ }
+
+ if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
+ // If the caller is still in the interpreter, then we can't expect inlining to
+ // happen anytime soon. Assume it's profitable to optimize it separately. This
+ // ensures that a function is SABI only if it is called no more frequently than
+ // any of its callers.
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is in LLInt.\n");
+ return;
+ }
+
+ if (callerCodeBlock->codeType() != FunctionCode) {
+ // If the caller is either eval or global code, assume that that won't be
+ // optimized anytime soon. For eval code this is particularly true since we
+ // delay eval optimization by a *lot*.
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is not a function.\n");
+ return;
+ }
+
+ ExecState* frame = callerFrame;
+ for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
+ if (frame->isVMEntrySentinel())
+ break;
+ if (frame->codeBlock() == this) {
+ // Recursive calls won't be inlined.
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because recursion was detected.\n");
+ m_shouldAlwaysBeInlined = false;
+ return;
+ }
+ }
+
+ RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
+
+ if (canCompile(callerCodeBlock->m_capabilityLevelState))
+ return;
+
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
+
+ m_shouldAlwaysBeInlined = false;
+#endif
+}
+
+unsigned CodeBlock::reoptimizationRetryCounter() const
+{
+#if ENABLE(JIT)
+ ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
+ return m_reoptimizationRetryCounter;
+#else
+ return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::countReoptimization()
+{
+ m_reoptimizationRetryCounter++;
+ if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
+ m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
+}
+
+unsigned CodeBlock::numberOfDFGCompiles()
+{
+ ASSERT(JITCode::isBaselineCode(jitType()));
+ if (Options::testTheFTL()) {
+ if (m_didFailFTLCompilation)
+ return 1000000;
+ return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+ }
+ return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
+}
+
+int32_t CodeBlock::codeTypeThresholdMultiplier() const
+{
+ if (codeType() == EvalCode)
+ return Options::evalThresholdMultiplier();
+
+ return 1;
+}
+
+double CodeBlock::optimizationThresholdScalingFactor()
+{
+ // This expression arises from doing a least-squares fit of
+ //
+ // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
+ //
+ // against the data points:
+ //
+ // x F[x_]
+ // 10 0.9 (smallest reasonable code block)
+ // 200 1.0 (typical small-ish code block)
+ // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
+ // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
+ // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
+ // 10000 6.0 (similar to above)
+ //
+ // I achieve the minimization using the following Mathematica code:
+ //
+ // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
+ //
+ // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
+ //
+ // solution =
+ // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
+ // {a, b, c, d}][[2]]
+ //
+ // And the code below (to initialize a, b, c, d) is generated by:
+ //
+ // Print["const double " <> ToString[#[[1]]] <> " = " <>
+ // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
+ //
+ // We've long known the following to be true:
+ // - Small code blocks are cheap to optimize and so we should do it sooner rather
+ // than later.
+ // - Large code blocks are expensive to optimize and so we should postpone doing so,
+ // and sometimes have a large enough threshold that we never optimize them.
+ // - The difference in cost is not totally linear because (a) just invoking the
+ // DFG incurs some base cost and (b) for large code blocks there is enough slop
+ // in the correlation between instruction count and the actual compilation cost
+ // that for those large blocks, the instruction count should not have a strong
+ // influence on our threshold.
+ //
+ // I knew the goals but I didn't know how to achieve them; so I picked an interesting
+ // example where the heuristics were right (code block in 3d-cube with instruction
+ // count 320, which got compiled early as it should have been) and one where they were
+ // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
+ // to compile and didn't run often enough to warrant compilation in my opinion), and
+ // then threw in additional data points that represented my own guess of what our
+ // heuristics should do for some round-numbered examples.
+ //
+ // The expression to which I decided to fit the data arose because I started with an
+ // affine function, and then did two things: put the linear part in an Abs to ensure
+ // that the fit didn't end up choosing a negative value of c (which would result in
+ // the function turning over and going negative for large x) and I threw in a Sqrt
+ // term because Sqrt represents my intution that the function should be more sensitive
+ // to small changes in small values of x, but less sensitive when x gets large.
+
+ // Note that the current fit essentially eliminates the linear portion of the
+ // expression (c == 0.0).
+ const double a = 0.061504;
+ const double b = 1.02406;
+ const double c = 0.0;
+ const double d = 0.825914;
+
+ double instructionCount = this->instructionCount();
+
+ ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
+
+ double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+
+ result *= codeTypeThresholdMultiplier();
+
+ if (Options::verboseOSR()) {
+ dataLog(
+ *this, ": instruction count is ", instructionCount,
+ ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
+ "\n");
+ }
+ return result;
+}
+
+static int32_t clipThreshold(double threshold)
+{
+ if (threshold < 1.0)
+ return 1;
+
+ if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
+ return std::numeric_limits<int32_t>::max();
+
+ return static_cast<int32_t>(threshold);
+}
+
+int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
+{
+ return clipThreshold(
+ static_cast<double>(desiredThreshold) *
+ optimizationThresholdScalingFactor() *
+ (1 << reoptimizationRetryCounter()));
+}
+
+bool CodeBlock::checkIfOptimizationThresholdReached()
+{
+#if ENABLE(DFG_JIT)
+ if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
+ if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
+ == DFG::Worklist::Compiled) {
+ optimizeNextInvocation();
+ return true;
+ }
+ }
+#endif
+
+ return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
+}
+
+void CodeBlock::optimizeNextInvocation()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Optimizing next invocation.\n");
+ m_jitExecuteCounter.setNewThreshold(0, this);
+}
+
+void CodeBlock::dontOptimizeAnytimeSoon()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Not optimizing anytime soon.\n");
+ m_jitExecuteCounter.deferIndefinitely();
+}
+
+void CodeBlock::optimizeAfterWarmUp()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Optimizing after warm-up.\n");
+#if ENABLE(DFG_JIT)
+ m_jitExecuteCounter.setNewThreshold(
+ adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
+#endif
+}
+
+void CodeBlock::optimizeAfterLongWarmUp()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Optimizing after long warm-up.\n");
+#if ENABLE(DFG_JIT)
+ m_jitExecuteCounter.setNewThreshold(
+ adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
+#endif
+}
+
+void CodeBlock::optimizeSoon()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Optimizing soon.\n");
+#if ENABLE(DFG_JIT)
+ m_jitExecuteCounter.setNewThreshold(
+ adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
+#endif
+}
+
+void CodeBlock::forceOptimizationSlowPathConcurrently()
+{
+ if (Options::verboseOSR())
+ dataLog(*this, ": Forcing slow path concurrently.\n");
+ m_jitExecuteCounter.forceSlowPathConcurrently();
+}
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
+{
+ JITCode::JITType type = jitType();
+ if (type != JITCode::BaselineJIT) {
+ dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ CodeBlock* theReplacement = replacement();
+ if ((result == CompilationSuccessful) != (theReplacement != this)) {
+ dataLog(*this, ": we have result = ", result, " but ");
+ if (theReplacement == this)
+ dataLog("we are our own replacement.\n");
+ else
+ dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ switch (result) {
+ case CompilationSuccessful:
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
+ optimizeNextInvocation();
+ return;
+ case CompilationFailed:
+ dontOptimizeAnytimeSoon();
+ return;
+ case CompilationDeferred:
+ // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
+ // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
+ // necessarily guarantee anything. So, we make sure that even if that
+ // function ends up being a no-op, we still eventually retry and realize
+ // that we have optimized code ready.
+ optimizeAfterWarmUp();
+ return;
+ case CompilationInvalidated:
+ // Retry with exponential backoff.
+ countReoptimization();
+ optimizeAfterWarmUp();
+ return;
+ }
+
+ dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+#endif
+
+uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
+{
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ // Compute this the lame way so we don't saturate. This is called infrequently
+ // enough that this loop won't hurt us.
+ unsigned result = desiredThreshold;
+ for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
+ unsigned newResult = result << 1;
+ if (newResult < result)
+ return std::numeric_limits<uint32_t>::max();
+ result = newResult;
+ }
+ return result;
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimization()
+{
+ return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
+{
+ return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
+}
+
+bool CodeBlock::shouldReoptimizeNow()
+{
+ return osrExitCounter() >= exitCountThresholdForReoptimization();
+}
+
+bool CodeBlock::shouldReoptimizeFromLoopNow()
+{
+ return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
+}
+#endif
+
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+{
+ for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
+ if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
+ return &m_arrayProfiles[i];
+ }
+ return 0;
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+ ArrayProfile* result = getArrayProfile(bytecodeOffset);
+ if (result)
+ return result;
+ return addArrayProfile(bytecodeOffset);
+}
+
+void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
+{
+ ConcurrentJITLocker locker(m_lock);
+
+ numberOfLiveNonArgumentValueProfiles = 0;
+ numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ unsigned numSamples = profile->totalNumberOfSamples();
+ if (numSamples > ValueProfile::numberOfBuckets)
+ numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
+ numberOfSamplesInProfiles += numSamples;
+ if (profile->m_bytecodeOffset < 0) {
+ profile->computeUpdatedPrediction(locker);
+ continue;
+ }
+ if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
+ numberOfLiveNonArgumentValueProfiles++;
+ profile->computeUpdatedPrediction(locker);
+ }
+
+#if ENABLE(DFG_JIT)
+ m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
+#endif
+}
+
+void CodeBlock::updateAllValueProfilePredictions()
+{
+ unsigned ignoredValue1, ignoredValue2;
+ updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
+}
+
+void CodeBlock::updateAllArrayPredictions()
+{
+ ConcurrentJITLocker locker(m_lock);
+
+ for (unsigned i = m_arrayProfiles.size(); i--;)
+ m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
+
+ // Don't count these either, for similar reasons.
+ for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
+ m_arrayAllocationProfiles[i].updateIndexingType();
+}
+
+void CodeBlock::updateAllPredictions()
+{
+ updateAllValueProfilePredictions();
+ updateAllArrayPredictions();
+}
+
+bool CodeBlock::shouldOptimizeNow()
+{
+ if (Options::verboseOSR())
+ dataLog("Considering optimizing ", *this, "...\n");
+
+ if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
+ return true;
+
+ updateAllArrayPredictions();
+
+ unsigned numberOfLiveNonArgumentValueProfiles;
+ unsigned numberOfSamplesInProfiles;
+ updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
+
+ if (Options::verboseOSR()) {
+ dataLogF(
+ "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
+ (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
+ numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
+ (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
+ numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
+ }
+
+ if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
+ && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
+ && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
+ return true;
+
+ ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
+ m_optimizationDelayCounter++;
+ optimizeAfterWarmUp();
+ return false;
+}
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::tallyFrequentExitSites()
+{
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
+
+ CodeBlock* profiledBlock = alternative();
+
+ switch (jitType()) {
+ case JITCode::DFGJIT: {
+ DFG::JITCode* jitCode = m_jitCode->dfg();
+ for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
+ DFG::OSRExit& exit = jitCode->osrExit[i];
+
+ if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
+ continue;
+ }
+ break;
+ }
+
+#if ENABLE(FTL_JIT)
+ case JITCode::FTLJIT: {
+ // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
+ // vector contains a totally different type, that just so happens to behave like
+ // DFG::JITCode::osrExit.
+ FTL::JITCode* jitCode = m_jitCode->ftl();
+ for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
+ FTL::OSRExit& exit = jitCode->osrExit[i];
+
+ if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
+ continue;
+ }
+ break;
+ }
+#endif
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+}
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+void CodeBlock::dumpValueProfiles()
+{
+ dataLog("ValueProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ if (profile->m_bytecodeOffset < 0) {
+ ASSERT(profile->m_bytecodeOffset == -1);
+ dataLogF(" arg = %u: ", i);
+ } else
+ dataLogF(" bc = %d: ", profile->m_bytecodeOffset);
+ if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
+ dataLogF("<empty>\n");
+ continue;
+ }
+ profile->dump(WTF::dataFile());
+ dataLogF("\n");
+ }
+ dataLog("RareCaseProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
+ RareCaseProfile* profile = rareCaseProfile(i);
+ dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+ dataLog("SpecialFastCaseProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
+ RareCaseProfile* profile = specialFastCaseProfile(i);
+ dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+}
+#endif // ENABLE(VERBOSE_VALUE_PROFILE)
+
+unsigned CodeBlock::frameRegisterCount()
+{
+ switch (jitType()) {
+ case JITCode::InterpreterThunk:
+ return LLInt::frameRegisterCountFor(this);
+
+#if ENABLE(JIT)
+ case JITCode::BaselineJIT:
+ return JIT::frameRegisterCountFor(this);
+#endif // ENABLE(JIT)
+
+#if ENABLE(DFG_JIT)
+ case JITCode::DFGJIT:
+ case JITCode::FTLJIT:
+ return jitCode()->dfgCommon()->frameRegisterCount;
+#endif // ENABLE(DFG_JIT)
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+int CodeBlock::stackPointerOffset()
+{
+ return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
+size_t CodeBlock::predictedMachineCodeSize()
+{
+ // This will be called from CodeBlock::CodeBlock before either m_vm or the
+ // instructions have been initialized. It's OK to return 0 because what will really
+ // matter is the recomputation of this value when the slow path is triggered.
+ if (!m_vm)
+ return 0;
+
+ if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+ return 0; // It's as good of a prediction as we'll get.
+
+ // Be conservative: return a size that will be an overestimation 84% of the time.
+ double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+
+ // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
+ // here is OK, since this whole method is just a heuristic.
+ if (multiplier < 0 || multiplier > 1000)
+ return 0;
+
+ double doubleResult = multiplier * m_instructions.size();
+
+ // Be even more paranoid: silently reject values that won't fit into a size_t. If
+ // the function is so huge that we can't even fit it into virtual memory then we
+ // should probably have some other guards in place to prevent us from even getting
+ // to this point.
+ if (doubleResult > std::numeric_limits<size_t>::max())
+ return 0;
+
+ return static_cast<size_t>(doubleResult);
+}
+
+bool CodeBlock::usesOpcode(OpcodeID opcodeID)
+{
+ Interpreter* interpreter = vm()->interpreter;
+ Instruction* instructionsBegin = instructions().begin();
+ unsigned instructionCount = instructions().size();
+
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
+ switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
+#define DEFINE_OP(curOpcode, length) \
+ case curOpcode: \
+ if (curOpcode == opcodeID) \
+ return true; \
+ bytecodeOffset += length; \
+ break;
+ FOR_EACH_OPCODE_ID(DEFINE_OP)
+#undef DEFINE_OP
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ return false;
+}
+
+String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
+{
+ ConcurrentJITLocker locker(symbolTable()->m_lock);
+ SymbolTable::Map::iterator end = symbolTable()->end(locker);
+ for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
+ if (ptr->value.getIndex() == virtualRegister.offset()) {
+ // FIXME: This won't work from the compilation thread.
+ // https://bugs.webkit.org/show_bug.cgi?id=115300
+ return String(ptr->key);
+ }
+ }
+ if (needsActivation() && virtualRegister == activationRegister())
+ return ASCIILiteral("activation");
+ if (virtualRegister == thisRegister())
+ return ASCIILiteral("this");
+ if (usesArguments()) {
+ if (virtualRegister == argumentsRegister())
+ return ASCIILiteral("arguments");
+ if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
+ return ASCIILiteral("real arguments");
+ }
+ if (virtualRegister.isArgument())
+ return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
+
+ return "";
+}
+
+namespace {
+
+struct VerifyCapturedDef {
+ void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
+ {
+ unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
+
+ if (codeBlock->isConstantRegisterIndex(operand)) {
+ codeBlock->beginValidationDidFail();
+ dataLog(" At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
+ codeBlock->endValidationDidFail();
+ return;
+ }
+
+ switch (opcodeID) {
+ case op_enter:
+ case op_captured_mov:
+ case op_init_lazy_reg:
+ case op_create_arguments:
+ case op_new_captured_func:
+ return;
+ default:
+ break;
+ }
+
+ VirtualRegister virtualReg(operand);
+ if (!virtualReg.isLocal())
+ return;
+
+ if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
+ codeBlock->beginValidationDidFail();
+ dataLog(" At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
+ codeBlock->endValidationDidFail();
+ return;
+ }
+
+ return;
+ }
+};
+
+} // anonymous namespace
+
+void CodeBlock::validate()
+{
+ BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
+
+ FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
+
+ if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
+ beginValidationDidFail();
+ dataLog(" Wrong number of bits in result!\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ dataLog(" Bit count: ", liveAtHead.numBits(), "\n");
+ endValidationDidFail();
+ }
+
+ for (unsigned i = m_numCalleeRegisters; i--;) {
+ bool isCaptured = false;
+ VirtualRegister reg = virtualRegisterForLocal(i);
+
+ if (captureCount())
+ isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
+
+ if (isCaptured) {
+ if (!liveAtHead.get(i)) {
+ beginValidationDidFail();
+ dataLog(" Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ endValidationDidFail();
+ }
+ } else {
+ if (liveAtHead.get(i)) {
+ beginValidationDidFail();
+ dataLog(" Variable loc", i, " is expected to be dead.\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ endValidationDidFail();
+ }
+ }
+ }
+
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
+ Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ VerifyCapturedDef verifyCapturedDef;
+ computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
+
+ bytecodeOffset += opcodeLength(opcodeID);
+ }
+}
+
+void CodeBlock::beginValidationDidFail()
+{
+ dataLog("Validation failure in ", *this, ":\n");
+ dataLog("\n");
+}
+
+void CodeBlock::endValidationDidFail()
+{
+ dataLog("\n");
+ dumpBytecode();
+ dataLog("\n");
+ dataLog("Validation failure.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void CodeBlock::addBreakpoint(unsigned numBreakpoints)
+{
+ m_numBreakpoints += numBreakpoints;
+ ASSERT(m_numBreakpoints);
+ if (JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerBreakpoint);
+}
+
+void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
+{
+ m_steppingMode = mode;
+ if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+ DFG::CapabilityLevel result = capabilityLevelInternal();
+ m_capabilityLevelState = result;
+ return result;
+}
+#endif
+