+
+ ConcurrentJITLocker locker(m_lock);
+
+ RepatchBuffer repatchBuffer(this);
+ resetStubInternal(repatchBuffer, stubInfo);
+}
+
+void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (Options::verboseOSR()) {
+ // This can be called from GC destructor calls, so we don't try to do a full dump
+ // of the CodeBlock.
+ dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
+ }
+
+ RELEASE_ASSERT(JITCode::isJIT(jitType()));
+
+ if (isGetByIdAccess(accessType))
+ resetGetByID(repatchBuffer, stubInfo);
+ else if (isPutByIdAccess(accessType))
+ resetPutByID(repatchBuffer, stubInfo);
+ else {
+ RELEASE_ASSERT(isInAccess(accessType));
+ resetIn(repatchBuffer, stubInfo);
+ }
+
+ stubInfo.reset();
+}
+
+void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ resetStubInternal(repatchBuffer, stubInfo);
+ stubInfo.resetByGC = true;
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ if ((*iter)->codeOrigin == CodeOrigin(index))
+ return *iter;
+ }
+ return nullptr;
+}
+#endif
+
+void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+{
+ visitor.append(&m_globalObject);
+ visitor.append(&m_ownerExecutable);
+ visitor.append(&m_symbolTable);
+ visitor.append(&m_unlinkedCode);
+ if (m_rareData)
+ m_rareData->m_evalCodeCache.visitAggregate(visitor);
+ visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
+ for (size_t i = 0; i < m_functionExprs.size(); ++i)
+ visitor.append(&m_functionExprs[i]);
+ for (size_t i = 0; i < m_functionDecls.size(); ++i)
+ visitor.append(&m_functionDecls[i]);
+ for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
+ m_objectAllocationProfiles[i].visitAggregate(visitor);
+
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType())) {
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->inlineCallFrames.get())
+ dfgCommon->inlineCallFrames->visitAggregate(visitor);
+ }
+#endif
+
+ updateAllPredictions();
+}
+
+void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return;
+
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+
+ for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+ if (!!dfgCommon->transitions[i].m_codeOrigin)
+ visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+ visitor.append(&dfgCommon->transitions[i].m_from);
+ visitor.append(&dfgCommon->transitions[i].m_to);
+ }
+
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
+ visitor.append(&dfgCommon->weakReferences[i]);
+#endif
+}
+
+CodeBlock* CodeBlock::baselineAlternative()
+{
+#if ENABLE(JIT)
+ CodeBlock* result = this;
+ while (result->alternative())
+ result = result->alternative();
+ RELEASE_ASSERT(result);
+ RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
+ return result;
+#else
+ return this;
+#endif
+}
+
+CodeBlock* CodeBlock::baselineVersion()
+{
+#if ENABLE(JIT)
+ if (JITCode::isBaselineCode(jitType()))
+ return this;
+ CodeBlock* result = replacement();
+ if (!result) {
+ // This can happen if we're creating the original CodeBlock for an executable.
+ // Assume that we're the baseline CodeBlock.
+ RELEASE_ASSERT(jitType() == JITCode::None);
+ return this;
+ }
+ result = result->baselineAlternative();
+ return result;
+#else
+ return this;
+#endif
+}
+
+#if ENABLE(JIT)
+bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
+{
+ return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
+}
+
+bool CodeBlock::hasOptimizedReplacement()
+{
+ return hasOptimizedReplacement(jitType());
+}
+#endif
+
+bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
+{
+ if (operand.isArgument())
+ return operand.toArgument() && usesArguments();
+
+ if (inlineCallFrame)
+ return inlineCallFrame->capturedVars.get(operand.toLocal());
+
+ // The activation object isn't in the captured region, but it's "captured"
+ // in the sense that stores to its location can be observed indirectly.
+ if (needsActivation() && operand == activationRegister())
+ return true;
+
+ // Ditto for the arguments object.
+ if (usesArguments() && operand == argumentsRegister())
+ return true;
+ if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
+ return true;
+
+ // We're in global code so there are no locals to capture
+ if (!symbolTable())
+ return false;
+
+ return symbolTable()->isCaptured(operand.offset());
+}
+
+int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
+{
+ // We'll be adding this to the stack pointer to get a registers pointer that looks
+ // like it would have looked in the baseline engine. For example, if bytecode would
+ // have put the first captured variable at offset -5 but we put it at offset -1, then
+ // we'll have an offset of 4.
+ int32_t offset = 0;
+
+ // Compute where we put the captured variables. This offset will point the registers
+ // pointer directly at the first captured var.
+ offset += machineCaptureStart;
+
+ // Now compute the offset needed to make the runtime see the captured variables at the
+ // same offset that the bytecode would have used.
+ offset -= symbolTable()->captureStart();
+
+ return offset;
+}
+
+int CodeBlock::framePointerOffsetToGetActivationRegisters()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return 0;
+#if ENABLE(DFG_JIT)
+ return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
+#else
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+#endif
+}
+
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+{
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+
+ if (!m_rareData)
+ return 0;
+
+ Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+ // Handlers are ordered innermost first, so the first handler we encounter
+ // that contains the source address is the correct handler to use.
+ if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
+ return &exceptionHandlers[i];
+ }
+
+ return 0;
+}
+
+unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+ return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+}
+
+unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+ int divot;
+ int startOffset;
+ int endOffset;
+ unsigned line;
+ unsigned column;
+ expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ return column;
+}
+
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+{
+ m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ divot += m_sourceOffset;
+ column += line ? 1 : firstLineColumnOffset();
+ line += m_ownerExecutable->lineNo();
+}
+
+bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
+{
+ Interpreter* interpreter = vm()->interpreter;
+ const Instruction* begin = instructions().begin();
+ const Instruction* end = instructions().end();
+ for (const Instruction* it = begin; it != end;) {
+ OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
+ if (opcodeID == op_debug) {
+ unsigned bytecodeOffset = it - begin;
+ int unused;
+ unsigned opDebugLine;
+ unsigned opDebugColumn;
+ expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
+ if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
+ return true;
+ }
+ it += opcodeLengths[opcodeID];
+ }
+ return false;
+}
+
+void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
+{
+ m_rareCaseProfiles.shrinkToFit();
+ m_specialFastCaseProfiles.shrinkToFit();
+
+ if (shrinkMode == EarlyShrink) {
+ m_constantRegisters.shrinkToFit();
+
+ if (m_rareData) {
+ m_rareData->m_switchJumpTables.shrinkToFit();
+ m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ }
+ } // else don't shrink these, because we would have already pointed pointers into these tables.
+}
+
+unsigned CodeBlock::addOrFindConstant(JSValue v)
+{
+ unsigned result;
+ if (findConstant(v, result))
+ return result;
+ return addConstant(v);
+}
+
+bool CodeBlock::findConstant(JSValue v, unsigned& index)
+{
+ unsigned numberOfConstants = numberOfConstantRegisters();
+ for (unsigned i = 0; i < numberOfConstants; ++i) {
+ if (getConstant(FirstConstantRegisterIndex + i) == v) {
+ index = i;
+ return true;
+ }
+ }
+ index = numberOfConstants;
+ return false;
+}
+
+#if ENABLE(JIT)
+void CodeBlock::unlinkCalls()
+{
+ if (!!m_alternative)
+ m_alternative->unlinkCalls();
+ for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked())
+ m_llintCallLinkInfos[i].unlink();
+ }
+ if (m_callLinkInfos.isEmpty())
+ return;
+ if (!m_vm->canUseJIT())
+ return;
+ RepatchBuffer repatchBuffer(this);
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ CallLinkInfo& info = **iter;
+ if (!info.isLinked())
+ continue;
+ info.unlink(repatchBuffer);
+ }
+}
+
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+{
+ noticeIncomingCall(callerFrame);
+ m_incomingCalls.push(incoming);
+}
+#endif // ENABLE(JIT)
+
+void CodeBlock::unlinkIncomingCalls()
+{
+ while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+ m_incomingLLIntCalls.begin()->unlink();
+#if ENABLE(JIT)
+ if (m_incomingCalls.isEmpty())
+ return;
+ RepatchBuffer repatchBuffer(this);
+ while (m_incomingCalls.begin() != m_incomingCalls.end())
+ m_incomingCalls.begin()->unlink(repatchBuffer);
+#endif // ENABLE(JIT)
+}
+
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
+{
+ noticeIncomingCall(callerFrame);
+ m_incomingLLIntCalls.push(incoming);
+}
+
+void CodeBlock::clearEvalCache()
+{
+ if (!!m_alternative)
+ m_alternative->clearEvalCache();
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ otherBlock->clearEvalCache();
+ if (!m_rareData)
+ return;
+ m_rareData->m_evalCodeCache.clear();
+}
+
+void CodeBlock::install()
+{
+ ownerExecutable()->installCode(this);
+}
+
+PassRefPtr<CodeBlock> CodeBlock::newReplacement()
+{
+ return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
+}
+
+const SlowArgument* CodeBlock::machineSlowArguments()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return symbolTable()->slowArguments();
+
+#if ENABLE(DFG_JIT)
+ return jitCode()->dfgCommon()->slowArguments.get();
+#else // ENABLE(DFG_JIT)
+ return 0;
+#endif // ENABLE(DFG_JIT)
+}
+
+#if ENABLE(JIT)
+CodeBlock* ProgramCodeBlock::replacement()
+{
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* EvalCodeBlock::replacement()
+{
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* FunctionCodeBlock::replacement()
+{
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+{
+ return DFG::programCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
+{
+ return DFG::evalCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+{
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+}
+#endif
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode)
+{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldShowDisassembly()) {
+ dataLog("Jettisoning ", *this);
+ if (mode == CountReoptimization)
+ dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason, ".\n");