+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+}
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+void CodeBlock::dumpValueProfiles()
+{
+ dataLog("ValueProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ if (profile->m_bytecodeOffset < 0) {
+ ASSERT(profile->m_bytecodeOffset == -1);
+ dataLogF(" arg = %u: ", i);
+ } else
+ dataLogF(" bc = %d: ", profile->m_bytecodeOffset);
+ if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
+ dataLogF("<empty>\n");
+ continue;
+ }
+ profile->dump(WTF::dataFile());
+ dataLogF("\n");
+ }
+ dataLog("RareCaseProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
+ RareCaseProfile* profile = rareCaseProfile(i);
+ dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+ dataLog("SpecialFastCaseProfile for ", *this, ":\n");
+ for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
+ RareCaseProfile* profile = specialFastCaseProfile(i);
+ dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+}
+#endif // ENABLE(VERBOSE_VALUE_PROFILE)
+
+unsigned CodeBlock::frameRegisterCount()
+{
+ switch (jitType()) {
+ case JITCode::InterpreterThunk:
+ return LLInt::frameRegisterCountFor(this);
+
+#if ENABLE(JIT)
+ case JITCode::BaselineJIT:
+ return JIT::frameRegisterCountFor(this);
+#endif // ENABLE(JIT)
+
+#if ENABLE(DFG_JIT)
+ case JITCode::DFGJIT:
+ case JITCode::FTLJIT:
+ return jitCode()->dfgCommon()->frameRegisterCount;
+#endif // ENABLE(DFG_JIT)
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+int CodeBlock::stackPointerOffset()
+{
+ return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
+size_t CodeBlock::predictedMachineCodeSize()
+{
+ // This will be called from CodeBlock::CodeBlock before either m_vm or the
+ // instructions have been initialized. It's OK to return 0 because what will really
+ // matter is the recomputation of this value when the slow path is triggered.
+ if (!m_vm)
+ return 0;
+
+ if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+ return 0; // It's as good of a prediction as we'll get.
+
+ // Be conservative: return a size that will be an overestimation 84% of the time.
+ double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+
+ // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
+ // here is OK, since this whole method is just a heuristic.
+ if (multiplier < 0 || multiplier > 1000)
+ return 0;
+
+ double doubleResult = multiplier * m_instructions.size();
+
+ // Be even more paranoid: silently reject values that won't fit into a size_t. If
+ // the function is so huge that we can't even fit it into virtual memory then we
+ // should probably have some other guards in place to prevent us from even getting
+ // to this point.
+ if (doubleResult > std::numeric_limits<size_t>::max())
+ return 0;
+
+ return static_cast<size_t>(doubleResult);
+}
+
+bool CodeBlock::usesOpcode(OpcodeID opcodeID)
+{
+ Interpreter* interpreter = vm()->interpreter;
+ Instruction* instructionsBegin = instructions().begin();
+ unsigned instructionCount = instructions().size();
+
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
+ switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
+#define DEFINE_OP(curOpcode, length) \
+ case curOpcode: \
+ if (curOpcode == opcodeID) \
+ return true; \
+ bytecodeOffset += length; \
+ break;
+ FOR_EACH_OPCODE_ID(DEFINE_OP)
+#undef DEFINE_OP
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ return false;
+}
+
+String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
+{
+ ConcurrentJITLocker locker(symbolTable()->m_lock);
+ SymbolTable::Map::iterator end = symbolTable()->end(locker);
+ for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
+ if (ptr->value.getIndex() == virtualRegister.offset()) {
+ // FIXME: This won't work from the compilation thread.
+ // https://bugs.webkit.org/show_bug.cgi?id=115300
+ return String(ptr->key);
+ }
+ }
+ if (needsActivation() && virtualRegister == activationRegister())
+ return ASCIILiteral("activation");
+ if (virtualRegister == thisRegister())
+ return ASCIILiteral("this");
+ if (usesArguments()) {
+ if (virtualRegister == argumentsRegister())
+ return ASCIILiteral("arguments");
+ if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
+ return ASCIILiteral("real arguments");