+ break;
+ }
+ case ProfileTypeBytecodePutToLocalScope:
+ case ProfileTypeBytecodeGetFromLocalScope: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ symbolTable = m_symbolTable.get();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
+
+ break;
+ }
+
+ case ProfileTypeBytecodeHasGlobalID: {
+ symbolTable = m_symbolTable.get();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ globalVariableID = symbolTable->uniqueIDForOffset(locker, VarOffset(profileRegister), *vm());
+ globalTypeSet = symbolTable->globalTypeSetForOffset(locker, VarOffset(profileRegister), *vm());
+ break;
+ }
+ case ProfileTypeBytecodeDoesNotHaveGlobalID:
+ case ProfileTypeBytecodeFunctionArgument: {
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+ break;
+ }
+ case ProfileTypeBytecodeFunctionReturnStatement: {
+ RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+ globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
+ globalVariableID = TypeProfilerReturnStatement;
+ if (!shouldAnalyze) {
+ // Because a return statement can be added implicitly to return undefined at the end of a function,
+ // and these nodes don't emit expression ranges because they aren't in the actual source text of
+ // the user's program, give the type profiler some range to identify these return statements.
+ // Currently, the text offset that is used as identification is on the open brace of the function
+ // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+ divotStart = divotEnd = m_sourceOffset;
+ shouldAnalyze = true;
+ }
+ break;
+ }
+ }
+
+ std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+ m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
+ TypeLocation* location = locationPair.first;
+ bool isNewLocation = locationPair.second;
+
+ if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+ location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
+
+ if (shouldAnalyze && isNewLocation)
+ vm()->typeProfiler()->insertNewLocation(location);
+
+ instructions[i + 2].u.location = location;
+ break;
+ }
+
+ case op_debug: {
+ if (pc[1].u.index == DidReachBreakpoint)
+ m_hasDebuggerStatement = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ i += opLength;
+ }
+
+ if (vm()->controlFlowProfiler())
+ insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+ m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+
+ // Set optimization thresholds only after m_instructions is initialized, since these
+ // rely on the instruction count (and are in theory permitted to also inspect the
+ // instruction stream to more accurate assess the cost of tier-up).
+ optimizeAfterWarmUp();
+ jitAfterWarmUp();
+
+ // If the concurrent thread will want the code block's hash, then compute it here
+ // synchronously.
+ if (Options::alwaysComputeHash())
+ hash();
+
+ if (Options::dumpGeneratedBytecodes())
+ dumpBytecode();
+
+ m_heap->m_codeBlocks.add(this);
+ m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+}
+
+CodeBlock::~CodeBlock()
+{
+ if (m_vm->m_perBytecodeProfiler)
+ m_vm->m_perBytecodeProfiler->notifyDestruction(this);
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ dumpValueProfiles();
+#endif
+ while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+ m_incomingLLIntCalls.begin()->remove();
+#if ENABLE(JIT)
+ // We may be destroyed before any CodeBlocks that refer to us are destroyed.
+ // Consider that two CodeBlocks become unreachable at the same time. There
+ // is no guarantee about the order in which the CodeBlocks are destroyed.
+ // So, if we don't remove incoming calls, and get destroyed before the
+ // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
+ // destructor will try to remove nodes from our (no longer valid) linked list.
+ while (m_incomingCalls.begin() != m_incomingCalls.end())
+ m_incomingCalls.begin()->remove();
+ while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+ m_incomingPolymorphicCalls.begin()->remove();
+
+ // Note that our outgoing calls will be removed from other CodeBlocks'
+ // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
+ // destructors.
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
+ (*iter)->deref();
+#endif // ENABLE(JIT)
+}
+
+void CodeBlock::setNumParameters(int newValue)
+{
+ m_numParameters = newValue;
+
+ m_argumentValueProfiles.resizeToFit(newValue);
+}
+
+void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
+{
+ EvalCacheMap::iterator end = m_cacheMap.end();
+ for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
+ visitor.append(&ptr->value);
+}
+
+CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
+{
+#if ENABLE(FTL_JIT)
+ if (jitType() != JITCode::DFGJIT)
+ return 0;
+ DFG::JITCode* jitCode = m_jitCode->dfg();
+ return jitCode->osrEntryBlock.get();
+#else // ENABLE(FTL_JIT)
+ return 0;
+#endif // ENABLE(FTL_JIT)
+}
+
+void CodeBlock::visitAggregate(SlotVisitor& visitor)
+{
+#if ENABLE(PARALLEL_GC)
+ // I may be asked to scan myself more than once, and it may even happen concurrently.
+ // To this end, use an atomic operation to check (and set) if I've been called already.
+ // Only one thread may proceed past this point - whichever one wins the atomic set race.
+ bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
+ if (!setByMe)
+ return;
+#endif // ENABLE(PARALLEL_GC)
+
+ if (!!m_alternative)
+ m_alternative->visitAggregate(visitor);
+
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ otherBlock->visitAggregate(visitor);
+
+ visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
+ if (m_jitCode)
+ visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
+ if (m_instructions.size()) {
+ // Divide by refCount() because m_instructions points to something that is shared
+ // by multiple CodeBlocks, and we only want to count it towards the heap size once.
+ // Having each CodeBlock report only its proportional share of the size is one way
+ // of accomplishing this.
+ visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+ }
+
+ visitor.append(&m_unlinkedCode);
+
+ // There are three things that may use unconditional finalizers: lazy bytecode freeing,
+ // inline cache clearing, and jettisoning. The probability of us wanting to do at
+ // least one of those things is probably quite close to 1. So we add one no matter what
+ // and when it runs, it figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(this);
+
+ m_allTransitionsHaveBeenMarked = false;
+
+ if (shouldImmediatelyAssumeLivenessDuringScan()) {
+ // This code block is live, so scan all references strongly and return.
+ stronglyVisitStrongReferences(visitor);
+ stronglyVisitWeakReferences(visitor);
+ propagateTransitions(visitor);
+ return;
+ }
+
+ // There are two things that we use weak reference harvesters for: DFG fixpoint for
+ // jettisoning, and trying to find structures that would be live based on some
+ // inline cache. So it makes sense to register them regardless.
+ visitor.addWeakReferenceHarvester(this);
+
+#if ENABLE(DFG_JIT)
+ // We get here if we're live in the sense that our owner executable is live,
+ // but we're not yet live for sure in another sense: we may yet decide that this
+ // code block should be jettisoned based on its outgoing weak references being
+ // stale. Set a flag to indicate that we're still assuming that we're dead, and
+ // perform one round of determining if we're live. The GC may determine, based on
+ // either us marking additional objects, or by other objects being marked for
+ // other reasons, that this iteration should run again; it will notify us of this
+ // decision by calling harvestWeakReferences().
+
+ m_jitCode->dfgCommon()->livenessHasBeenProved = false;
+
+ propagateTransitions(visitor);
+ determineLiveness(visitor);
+#else // ENABLE(DFG_JIT)
+ RELEASE_ASSERT_NOT_REACHED();
+#endif // ENABLE(DFG_JIT)
+}
+
+bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
+{
+#if ENABLE(DFG_JIT)
+ // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+ // their weak references go stale. So if a basline JIT CodeBlock gets
+ // scanned, we can assume that this means that it's live.
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return true;
+
+ // For simplicity, we don't attempt to jettison code blocks during GC if
+ // they are executing. Instead we strongly mark their weak references to
+ // allow them to continue to execute soundly.
+ if (m_mayBeExecuting)
+ return true;
+
+ if (Options::forceDFGCodeBlockLiveness())
+ return true;
+
+ return false;
+#else
+ return true;
+#endif
+}
+
+bool CodeBlock::isKnownToBeLiveDuringGC()
+{
+#if ENABLE(DFG_JIT)
+ // This should return true for:
+ // - Code blocks that behave like normal objects - i.e. if they are referenced then they
+ // are live.
+ // - Code blocks that were running on the stack.
+ // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
+ // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
+ // would survive as true.
+ // - Code blocks that don't have any dead weak references.
+
+ return shouldImmediatelyAssumeLivenessDuringScan()
+ || m_jitCode->dfgCommon()->livenessHasBeenProved;
+#else
+ return true;
+#endif
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+ if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
+ return false;
+
+ if (!Heap::isMarked(transition.m_from.get()))
+ return false;
+
+ return true;
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::propagateTransitions(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+ if (m_allTransitionsHaveBeenMarked)
+ return;
+
+ bool allAreMarkedSoFar = true;
+
+ Interpreter* interpreter = m_vm->interpreter;
+ if (jitType() == JITCode::InterpreterThunk) {
+ const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+ for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
+ Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line: {
+ if (Heap::isMarked(instruction[4].u.structure.get()))
+ visitor.append(&instruction[6].u.structure);
+ else
+ allAreMarkedSoFar = false;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType())) {
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo& stubInfo = **iter;
+ switch (stubInfo.accessType) {
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct: {
+ JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
+ if ((!origin || Heap::isMarked(origin))
+ && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
+ visitor.append(&stubInfo.u.putByIdTransition.structure);
+ else
+ allAreMarkedSoFar = false;
+ break;
+ }
+
+ case access_put_by_id_list: {
+ PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
+ JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
+ if (origin && !Heap::isMarked(origin)) {
+ allAreMarkedSoFar = false;
+ break;
+ }
+ for (unsigned j = list->size(); j--;) {
+ PutByIdAccess& access = list->m_list[j];
+ if (!access.isTransition())
+ continue;
+ if (Heap::isMarked(access.oldStructure()))
+ visitor.append(&access.m_newStructure);
+ else
+ allAreMarkedSoFar = false;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+#endif // ENABLE(JIT)
+
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType())) {
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+
+ for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+ if (shouldMarkTransition(dfgCommon->transitions[i])) {
+ // If the following three things are live, then the target of the
+ // transition is also live:
+ //
+ // - This code block. We know it's live already because otherwise
+ // we wouldn't be scanning ourselves.
+ //
+ // - The code origin of the transition. Transitions may arise from
+ // code that was inlined. They are not relevant if the user's
+ // object that is required for the inlinee to run is no longer
+ // live.
+ //
+ // - The source of the transition. The transition checks if some
+ // heap location holds the source, and if so, stores the target.
+ // Hence the source must be live for the transition to be live.
+ //
+ // We also short-circuit the liveness if the structure is harmless
+ // to mark (i.e. its global object and prototype are both already
+ // live).
+
+ visitor.append(&dfgCommon->transitions[i].m_to);
+ } else
+ allAreMarkedSoFar = false;
+ }
+ }
+#endif // ENABLE(DFG_JIT)
+
+ if (allAreMarkedSoFar)
+ m_allTransitionsHaveBeenMarked = true;
+}
+
+void CodeBlock::determineLiveness(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+ if (shouldImmediatelyAssumeLivenessDuringScan())
+ return;
+
+#if ENABLE(DFG_JIT)
+ // Check if we have any remaining work to do.
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->livenessHasBeenProved)
+ return;
+
+ // Now check all of our weak references. If all of them are live, then we
+ // have proved liveness and so we scan our strong references. If at end of
+ // GC we still have not proved liveness, then this code block is toast.
+ bool allAreLiveSoFar = true;
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+ if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+ if (allAreLiveSoFar) {
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+ if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+ }
+
+ // If some weak references are dead, then this fixpoint iteration was
+ // unsuccessful.
+ if (!allAreLiveSoFar)
+ return;
+
+ // All weak references are live. Record this information so we don't
+ // come back here again, and scan the strong references.
+ dfgCommon->livenessHasBeenProved = true;
+ stronglyVisitStrongReferences(visitor);
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+{
+ propagateTransitions(visitor);
+ determineLiveness(visitor);
+}
+
+void CodeBlock::finalizeUnconditionally()
+{
+ Interpreter* interpreter = m_vm->interpreter;
+ if (JITCode::couldBeInterpreted(jitType())) {
+ const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+ for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id:
+ case op_get_by_id_out_of_line:
+ case op_put_by_id:
+ case op_put_by_id_out_of_line:
+ if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
+ break;
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
+ curInstruction[4].u.structure.clear();
+ curInstruction[5].u.operand = 0;
+ break;
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
+ if (Heap::isMarked(curInstruction[4].u.structure.get())
+ && Heap::isMarked(curInstruction[6].u.structure.get())
+ && Heap::isMarked(curInstruction[7].u.structureChain.get()))
+ break;
+ if (Options::verboseOSR()) {
+ dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
+ curInstruction[4].u.structure.get(),
+ curInstruction[6].u.structure.get(),
+ curInstruction[7].u.structureChain.get());
+ }
+ curInstruction[4].u.structure.clear();
+ curInstruction[6].u.structure.clear();
+ curInstruction[7].u.structureChain.clear();
+ curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ break;
+ case op_get_array_length:
+ break;
+ case op_to_this:
+ if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
+ break;
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+ curInstruction[2].u.structure.clear();
+ curInstruction[3].u.toThisStatus = merge(
+ curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+ break;
+ case op_create_this: {
+ auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+ if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
+ break;
+ JSCell* cachedFunction = cacheWriteBarrier.get();
+ if (Heap::isMarked(cachedFunction))
+ break;
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+ cacheWriteBarrier.clear();
+ break;
+ }
+ case op_resolve_scope: {
+ // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+ // are for outer functions, and we refer to those functions strongly, and they refer
+ // to the symbol table strongly. But it's nice to be on the safe side.
+ WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
+ if (!symbolTable || Heap::isMarked(symbolTable.get()))
+ break;
+ if (Options::verboseOSR())
+ dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+ symbolTable.clear();
+ break;
+ }
+ case op_get_from_scope:
+ case op_put_to_scope: {
+ ResolveModeAndType modeAndType =
+ ResolveModeAndType(curInstruction[4].u.operand);
+ if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
+ continue;
+ WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
+ if (!structure || Heap::isMarked(structure.get()))
+ break;
+ if (Options::verboseOSR())
+ dataLogF("Clearing scope access with structure %p.\n", structure.get());
+ structure.clear();
+ break;
+ }
+ default:
+ OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
+ }
+ }
+
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (Options::verboseOSR())
+ dataLog("Clearing LLInt call from ", *this, "\n");
+ m_llintCallLinkInfos[i].unlink();
+ }
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ }
+ }
+
+#if ENABLE(DFG_JIT)
+ // Check if we're not live. If we are, then jettison.
+ if (!isKnownToBeLiveDuringGC()) {
+ if (Options::verboseOSR())
+ dataLog(*this, " has dead weak references, jettisoning during GC.\n");
+
+ if (DFG::shouldShowDisassembly()) {
+ dataLog(*this, " will be jettisoned because of the following dead references:\n");
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+ DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
+ JSCell* origin = transition.m_codeOrigin.get();
+ JSCell* from = transition.m_from.get();
+ JSCell* to = transition.m_to.get();
+ if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+ continue;
+ dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+ }
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+ JSCell* weak = dfgCommon->weakReferences[i].get();
+ if (Heap::isMarked(weak))
+ continue;
+ dataLog(" Weak reference ", RawPointer(weak), ".\n");
+ }
+ }
+
+ jettison(Profiler::JettisonDueToWeakReference);
+ return;
+ }
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(JIT)
+ // Handle inline caches.
+ if (!!jitCode()) {
+ RepatchBuffer repatchBuffer(this);
+
+ for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+ (*iter)->visitWeak(repatchBuffer);
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo& stubInfo = **iter;
+
+ if (stubInfo.visitWeakReferences(repatchBuffer))
+ continue;
+
+ resetStubDuringGCInternal(repatchBuffer, stubInfo);
+ }
+ }
+#endif
+}
+
+void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+{
+#if ENABLE(JIT)
+ toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
+{
+ ConcurrentJITLocker locker(m_lock);
+ getStubInfoMap(locker, result);
+}
+
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
+{
+#if ENABLE(JIT)
+ toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+{
+ ConcurrentJITLocker locker(m_lock);
+ getCallLinkInfoMap(locker, result);
+}
+
+#if ENABLE(JIT)
+StructureStubInfo* CodeBlock::addStubInfo()
+{
+ ConcurrentJITLocker locker(m_lock);
+ return m_stubInfos.add();
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+ for (StructureStubInfo* stubInfo : m_stubInfos) {
+ if (stubInfo->codeOrigin == codeOrigin)
+ return stubInfo;
+ }
+ return nullptr;
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+ ConcurrentJITLocker locker(m_lock);
+ return m_callLinkInfos.add();
+}
+
+void CodeBlock::resetStub(StructureStubInfo& stubInfo)
+{
+ if (stubInfo.accessType == access_unset)
+ return;
+
+ ConcurrentJITLocker locker(m_lock);
+
+ RepatchBuffer repatchBuffer(this);
+ resetStubInternal(repatchBuffer, stubInfo);
+}
+
+void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (Options::verboseOSR()) {
+ // This can be called from GC destructor calls, so we don't try to do a full dump
+ // of the CodeBlock.
+ dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
+ }
+
+ RELEASE_ASSERT(JITCode::isJIT(jitType()));
+
+ if (isGetByIdAccess(accessType))
+ resetGetByID(repatchBuffer, stubInfo);
+ else if (isPutByIdAccess(accessType))
+ resetPutByID(repatchBuffer, stubInfo);
+ else {
+ RELEASE_ASSERT(isInAccess(accessType));
+ resetIn(repatchBuffer, stubInfo);
+ }
+
+ stubInfo.reset();
+}
+
+void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+{
+ resetStubInternal(repatchBuffer, stubInfo);
+ stubInfo.resetByGC = true;
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ if ((*iter)->codeOrigin() == CodeOrigin(index))
+ return *iter;
+ }
+ return nullptr;
+}
+#endif
+
+void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+{
+ visitor.append(&m_globalObject);
+ visitor.append(&m_ownerExecutable);
+ visitor.append(&m_symbolTable);
+ visitor.append(&m_unlinkedCode);
+ if (m_rareData)
+ m_rareData->m_evalCodeCache.visitAggregate(visitor);
+ visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
+ for (size_t i = 0; i < m_functionExprs.size(); ++i)
+ visitor.append(&m_functionExprs[i]);
+ for (size_t i = 0; i < m_functionDecls.size(); ++i)
+ visitor.append(&m_functionDecls[i]);
+ for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
+ m_objectAllocationProfiles[i].visitAggregate(visitor);
+
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType())) {
+ // FIXME: This is an antipattern for two reasons. References introduced by the DFG
+ // that aren't in the original CodeBlock being compiled should be weakly referenced.
+ // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also,
+ // those weak references should already be tracked in the DFG as weak FrozenValues. So,
+ // there is probably no need for this. We already have assertions that this should be
+ // unnecessary.
+ // https://bugs.webkit.org/show_bug.cgi?id=146613
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->inlineCallFrames.get())
+ dfgCommon->inlineCallFrames->visitAggregate(visitor);
+ }
+#endif
+
+ updateAllPredictions();
+}
+
+void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return;
+
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+
+ for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+ if (!!dfgCommon->transitions[i].m_codeOrigin)
+ visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+ visitor.append(&dfgCommon->transitions[i].m_from);
+ visitor.append(&dfgCommon->transitions[i].m_to);
+ }
+
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
+ visitor.append(&dfgCommon->weakReferences[i]);
+
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
+ visitor.append(&dfgCommon->weakStructureReferences[i]);
+#endif
+}
+
+CodeBlock* CodeBlock::baselineAlternative()
+{
+#if ENABLE(JIT)
+ CodeBlock* result = this;
+ while (result->alternative())
+ result = result->alternative();
+ RELEASE_ASSERT(result);
+ RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
+ return result;
+#else
+ return this;
+#endif
+}
+
+CodeBlock* CodeBlock::baselineVersion()
+{
+#if ENABLE(JIT)
+ if (JITCode::isBaselineCode(jitType()))
+ return this;
+ CodeBlock* result = replacement();
+ if (!result) {
+ // This can happen if we're creating the original CodeBlock for an executable.
+ // Assume that we're the baseline CodeBlock.
+ RELEASE_ASSERT(jitType() == JITCode::None);
+ return this;
+ }
+ result = result->baselineAlternative();
+ return result;
+#else
+ return this;
+#endif
+}
+
+#if ENABLE(JIT)
+bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
+{
+ return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
+}
+
+bool CodeBlock::hasOptimizedReplacement()
+{
+ return hasOptimizedReplacement(jitType());
+}
+#endif
+
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
+{
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+
+ if (!m_rareData)
+ return 0;
+
+ Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+ HandlerInfo& handler = exceptionHandlers[i];
+ if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
+ continue;
+
+ // Handlers are ordered innermost first, so the first handler we encounter
+ // that contains the source address is the correct handler to use.
+ if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset)
+ return &handler;
+ }
+
+ return 0;
+}
+
+unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+ return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+}
+
+unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+ int divot;
+ int startOffset;
+ int endOffset;
+ unsigned line;
+ unsigned column;
+ expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ return column;
+}
+
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+{
+ m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+ divot += m_sourceOffset;
+ column += line ? 1 : firstLineColumnOffset();
+ line += m_ownerExecutable->firstLine();
+}
+
+bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
+{
+ Interpreter* interpreter = vm()->interpreter;
+ const Instruction* begin = instructions().begin();
+ const Instruction* end = instructions().end();
+ for (const Instruction* it = begin; it != end;) {
+ OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
+ if (opcodeID == op_debug) {
+ unsigned bytecodeOffset = it - begin;
+ int unused;
+ unsigned opDebugLine;
+ unsigned opDebugColumn;
+ expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
+ if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
+ return true;
+ }
+ it += opcodeLengths[opcodeID];
+ }
+ return false;
+}
+
+void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
+{
+ m_rareCaseProfiles.shrinkToFit();
+ m_specialFastCaseProfiles.shrinkToFit();
+
+ if (shrinkMode == EarlyShrink) {
+ m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
+
+ if (m_rareData) {
+ m_rareData->m_switchJumpTables.shrinkToFit();
+ m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ }
+ } // else don't shrink these, because we would have already pointed pointers into these tables.
+}
+
+#if ENABLE(JIT)
+void CodeBlock::unlinkCalls()
+{
+ if (!!m_alternative)
+ m_alternative->unlinkCalls();
+ for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked())
+ m_llintCallLinkInfos[i].unlink();
+ }
+ if (m_callLinkInfos.isEmpty())
+ return;
+ if (!m_vm->canUseJIT())
+ return;
+ RepatchBuffer repatchBuffer(this);
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ CallLinkInfo& info = **iter;
+ if (!info.isLinked())
+ continue;
+ info.unlink(repatchBuffer);
+ }
+}
+
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+{
+ noticeIncomingCall(callerFrame);
+ m_incomingCalls.push(incoming);
+}
+
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
+{
+ noticeIncomingCall(callerFrame);
+ m_incomingPolymorphicCalls.push(incoming);
+}
+#endif // ENABLE(JIT)
+
+void CodeBlock::unlinkIncomingCalls()
+{
+ while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+ m_incomingLLIntCalls.begin()->unlink();
+#if ENABLE(JIT)
+ if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
+ return;
+ RepatchBuffer repatchBuffer(this);
+ while (m_incomingCalls.begin() != m_incomingCalls.end())
+ m_incomingCalls.begin()->unlink(repatchBuffer);
+ while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+ m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
+#endif // ENABLE(JIT)
+}
+
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
+{
+ noticeIncomingCall(callerFrame);
+ m_incomingLLIntCalls.push(incoming);
+}
+
+void CodeBlock::clearEvalCache()
+{
+ if (!!m_alternative)
+ m_alternative->clearEvalCache();
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ otherBlock->clearEvalCache();
+ if (!m_rareData)
+ return;
+ m_rareData->m_evalCodeCache.clear();
+}
+
+void CodeBlock::install()
+{
+ ownerExecutable()->installCode(this);
+}
+
+PassRefPtr<CodeBlock> CodeBlock::newReplacement()
+{
+ return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
+}
+
+#if ENABLE(JIT)
+CodeBlock* ProgramCodeBlock::replacement()
+{
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* EvalCodeBlock::replacement()
+{
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
+}
+
+CodeBlock* FunctionCodeBlock::replacement()
+{
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+{
+ return DFG::programCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
+{
+ return DFG::evalCapabilityLevel(this);
+}
+
+DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+{
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+}
+#endif
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
+{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldShowDisassembly()) {
+ dataLog("Jettisoning ", *this);
+ if (mode == CountReoptimization)
+ dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason);
+ if (detail)
+ dataLog(", ", *detail);
+ dataLog(".\n");
+ }
+
+ DeferGCForAWhile deferGC(*m_heap);
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+
+ if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+ compilation->setJettisonReason(reason, detail);
+
+ // We want to accomplish two things here:
+ // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
+ // we should OSR exit at the top of the next bytecode instruction after the return.
+ // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
+
+ // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
+ // whether the invalidation has already happened.
+ if (!jitCode()->dfgCommon()->invalidate()) {
+ // Nothing to do since we've already been invalidated. That means that we cannot be
+ // the optimized replacement.
+ RELEASE_ASSERT(this != replacement());
+ return;
+ }
+
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did invalidate ", *this, "\n");
+
+ // Count the reoptimization if that's what the user wanted.
+ if (mode == CountReoptimization) {
+ // FIXME: Maybe this should call alternative().
+ // https://bugs.webkit.org/show_bug.cgi?id=123677
+ baselineAlternative()->countReoptimization();
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did count reoptimization for ", *this, "\n");
+ }
+
+ // Now take care of the entrypoint.
+ if (this != replacement()) {
+ // This means that we were never the entrypoint. This can happen for OSR entry code
+ // blocks.
+ return;
+ }
+ alternative()->optimizeAfterWarmUp();
+ tallyFrequentExitSites();
+ alternative()->install();
+ if (DFG::shouldShowDisassembly())
+ dataLog(" Did install baseline version of ", *this, "\n");
+#else // ENABLE(DFG_JIT)
+ UNUSED_PARAM(mode);
+ UNUSED_PARAM(detail);
+ UNREACHABLE_FOR_PLATFORM();
+#endif // ENABLE(DFG_JIT)
+}
+
+JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return globalObject();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+}
+
+class RecursionCheckFunctor {
+public:
+ RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+ : m_startCallFrame(startCallFrame)
+ , m_codeBlock(codeBlock)
+ , m_depthToCheck(depthToCheck)
+ , m_foundStartCallFrame(false)
+ , m_didRecurse(false)
+ { }
+
+ StackVisitor::Status operator()(StackVisitor& visitor)
+ {
+ CallFrame* currentCallFrame = visitor->callFrame();
+
+ if (currentCallFrame == m_startCallFrame)
+ m_foundStartCallFrame = true;
+
+ if (m_foundStartCallFrame) {
+ if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+ m_didRecurse = true;
+ return StackVisitor::Done;
+ }
+
+ if (!m_depthToCheck--)
+ return StackVisitor::Done;
+ }
+
+ return StackVisitor::Continue;
+ }
+
+ bool didRecurse() const { return m_didRecurse; }
+
+private:
+ CallFrame* m_startCallFrame;
+ CodeBlock* m_codeBlock;
+ unsigned m_depthToCheck;
+ bool m_foundStartCallFrame;
+ bool m_didRecurse;
+};
+
+void CodeBlock::noticeIncomingCall(ExecState* callerFrame)