+{
+#if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
+ if (!!m_dfgData) {
+ // I may be asked to scan myself more than once, and it may even happen concurrently.
+ // To this end, use a CAS loop to check if I've been called already. Only one thread
+ // may proceed past this point - whichever one wins the CAS race.
+ unsigned oldValue;
+ do {
+ oldValue = m_dfgData->visitAggregateHasBeenCalled;
+ if (oldValue) {
+ // Looks like someone else won! Return immediately to ensure that we don't
+ // trace the same CodeBlock concurrently. Doing so is hazardous since we will
+ // be mutating the state of ValueProfiles, which contain JSValues, which can
+ // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
+ // that are nearly impossible to track down.
+
+ // Also note that it must be safe to return early as soon as we see the
+ // value true (well, (unsigned)1), since once a GC thread is in this method
+ // and has won the CAS race (i.e. was responsible for setting the value true)
+ // it will definitely complete the rest of this method before declaring
+ // termination.
+ return;
+ }
+ } while (!WTF::weakCompareAndSwap(&m_dfgData->visitAggregateHasBeenCalled, 0, 1));
+ }
+#endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
+
+ if (!!m_alternative)
+ m_alternative->visitAggregate(visitor);
+
+ // There are three things that may use unconditional finalizers: lazy bytecode freeing,
+ // inline cache clearing, and jettisoning. The probability of us wanting to do at
+ // least one of those things is probably quite close to 1. So we add one no matter what
+ // and when it runs, it figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(this);
+
+ if (shouldImmediatelyAssumeLivenessDuringScan()) {
+ // This code block is live, so scan all references strongly and return.
+ stronglyVisitStrongReferences(visitor);
+ stronglyVisitWeakReferences(visitor);
+ return;
+ }
+
+#if ENABLE(DFG_JIT)
+ // We get here if we're live in the sense that our owner executable is live,
+ // but we're not yet live for sure in another sense: we may yet decide that this
+ // code block should be jettisoned based on its outgoing weak references being
+ // stale. Set a flag to indicate that we're still assuming that we're dead, and
+ // perform one round of determining if we're live. The GC may determine, based on
+ // either us marking additional objects, or by other objects being marked for
+ // other reasons, that this iteration should run again; it will notify us of this
+ // decision by calling harvestWeakReferences().
+
+ m_dfgData->livenessHasBeenProved = false;
+ m_dfgData->allTransitionsHaveBeenMarked = false;
+
+ performTracingFixpointIteration(visitor);
+
+ // GC doesn't have enough information yet for us to decide whether to keep our DFG
+ // data, so we need to register a handler to run again at the end of GC, when more
+ // information is available.
+ if (!(m_dfgData->livenessHasBeenProved && m_dfgData->allTransitionsHaveBeenMarked))
+ visitor.addWeakReferenceHarvester(this);
+
+#else // ENABLE(DFG_JIT)
+ ASSERT_NOT_REACHED();
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+ // Evaluate our weak reference transitions, if there are still some to evaluate.
+ if (!m_dfgData->allTransitionsHaveBeenMarked) {
+ bool allAreMarkedSoFar = true;
+ for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
+ if ((!m_dfgData->transitions[i].m_codeOrigin
+ || Heap::isMarked(m_dfgData->transitions[i].m_codeOrigin.get()))
+ && Heap::isMarked(m_dfgData->transitions[i].m_from.get())) {
+ // If the following three things are live, then the target of the
+ // transition is also live:
+ // - This code block. We know it's live already because otherwise
+ // we wouldn't be scanning ourselves.
+ // - The code origin of the transition. Transitions may arise from
+ // code that was inlined. They are not relevant if the user's
+ // object that is required for the inlinee to run is no longer
+ // live.
+ // - The source of the transition. The transition checks if some
+ // heap location holds the source, and if so, stores the target.
+ // Hence the source must be live for the transition to be live.
+ visitor.append(&m_dfgData->transitions[i].m_to);
+ } else
+ allAreMarkedSoFar = false;
+ }
+
+ if (allAreMarkedSoFar)
+ m_dfgData->allTransitionsHaveBeenMarked = true;
+ }
+
+ // Check if we have any remaining work to do.
+ if (m_dfgData->livenessHasBeenProved)
+ return;
+
+ // Now check all of our weak references. If all of them are live, then we
+ // have proved liveness and so we scan our strong references. If at end of
+ // GC we still have not proved liveness, then this code block is toast.
+ bool allAreLiveSoFar = true;
+ for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
+ if (!Heap::isMarked(m_dfgData->weakReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+
+ // If some weak references are dead, then this fixpoint iteration was
+ // unsuccessful.
+ if (!allAreLiveSoFar)
+ return;
+
+ // All weak references are live. Record this information so we don't
+ // come back here again, and scan the strong references.
+ m_dfgData->livenessHasBeenProved = true;
+ stronglyVisitStrongReferences(visitor);
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+{
+ performTracingFixpointIteration(visitor);
+}
+
+void CodeBlock::finalizeUnconditionally()
+{
+#if ENABLE(JIT)
+#if ENABLE(JIT_VERBOSE_OSR)
+ static const bool verboseUnlinking = true;
+#else
+ static const bool verboseUnlinking = false;
+#endif
+#endif // ENABLE(JIT)
+
+#if ENABLE(LLINT)
+ Interpreter* interpreter = m_globalData->interpreter;
+ // interpreter->classicEnabled() returns true if the old C++ interpreter is enabled. If that's enabled
+ // then we're not using LLInt.
+ if (!interpreter->classicEnabled() && !!numberOfInstructions()) {
+ for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[m_propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id:
+ case op_put_by_id:
+ if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
+ break;
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
+ curInstruction[4].u.structure.clear();
+ curInstruction[5].u.operand = 0;
+ break;
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ if (Heap::isMarked(curInstruction[4].u.structure.get())
+ && Heap::isMarked(curInstruction[6].u.structure.get())
+ && Heap::isMarked(curInstruction[7].u.structureChain.get()))
+ break;
+ if (verboseUnlinking) {
+ dataLog("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
+ curInstruction[4].u.structure.get(),
+ curInstruction[6].u.structure.get(),
+ curInstruction[7].u.structureChain.get());
+ }
+ curInstruction[4].u.structure.clear();
+ curInstruction[6].u.structure.clear();
+ curInstruction[7].u.structureChain.clear();
+ curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[m_globalResolveInstructions[i]];
+ ASSERT(interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global
+ || interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global_dynamic);
+ if (!curInstruction[3].u.structure || Heap::isMarked(curInstruction[3].u.structure.get()))
+ continue;
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt global resolve cache with structure %p.\n", curInstruction[3].u.structure.get());
+ curInstruction[3].u.structure.clear();
+ curInstruction[4].u.operand = 0;
+ }
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt call from %p.\n", this);
+ m_llintCallLinkInfos[i].unlink();
+ }
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ }
+ }
+#endif // ENABLE(LLINT)
+
+#if ENABLE(DFG_JIT)
+ // Check if we're not live. If we are, then jettison.
+ if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) {
+ if (verboseUnlinking)
+ dataLog("Code block %p has dead weak references, jettisoning during GC.\n", this);
+
+ // Make sure that the baseline JIT knows that it should re-warm-up before
+ // optimizing.
+ alternative()->optimizeAfterWarmUp();
+
+ jettison();
+ return;
+ }
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(JIT)
+ // Handle inline caches.
+ if (!!getJITCode()) {
+ RepatchBuffer repatchBuffer(this);
+ for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
+ if (callLinkInfo(i).isLinked() && !Heap::isMarked(callLinkInfo(i).callee.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing call from %p to %p.\n", this, callLinkInfo(i).callee.get());
+ callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
+ }
+ if (!!callLinkInfo(i).lastSeenCallee
+ && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
+ callLinkInfo(i).lastSeenCallee.clear();
+ }
+ for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
+ if (m_globalResolveInfos[i].structure && !Heap::isMarked(m_globalResolveInfos[i].structure.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing resolve info in %p.\n", this);
+ m_globalResolveInfos[i].structure.clear();
+ }
+ }
+
+ for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
+ StructureStubInfo& stubInfo = m_structureStubInfos[i];
+
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (stubInfo.visitWeakReferences())
+ continue;
+
+ if (verboseUnlinking)
+ dataLog("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
+
+ if (isGetByIdAccess(accessType)) {
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetGetByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchGetById(repatchBuffer, &stubInfo);
+ } else {
+ ASSERT(isPutByIdAccess(accessType));
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetPutByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchPutById(repatchBuffer, &stubInfo);
+ }
+
+ stubInfo.reset();
+ }
+
+ for (size_t size = m_methodCallLinkInfos.size(), i = 0; i < size; ++i) {
+ if (!m_methodCallLinkInfos[i].cachedStructure)
+ continue;
+
+ ASSERT(m_methodCallLinkInfos[i].seenOnce());
+ ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
+
+ if (!Heap::isMarked(m_methodCallLinkInfos[i].cachedStructure.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototypeStructure.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedFunction.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototype.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing method call in %p.\n", this);
+ m_methodCallLinkInfos[i].reset(repatchBuffer, getJITType());
+
+ StructureStubInfo& stubInfo = getStubInfo(m_methodCallLinkInfos[i].bytecodeIndex);
+
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (accessType != access_unset) {
+ ASSERT(isGetByIdAccess(accessType));
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetGetByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchGetById(repatchBuffer, &stubInfo);
+ stubInfo.reset();
+ }
+ }
+ }
+ }
+#endif
+}
+
+void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)