+static void revertCall(
+ RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
+{
+ repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, generator);
+ callLinkInfo.clearSeen();
+ callLinkInfo.clearCallee();
+ callLinkInfo.clearStub();
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
+}
+
+void unlinkFor(
+ RepatchBuffer& repatchBuffer, CallLinkInfo& callLinkInfo,
+ CodeSpecializationKind kind, RegisterPreservationMode registers)
+{
+ if (Options::showDisassembly())
+ dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), " in request from ", pointerDump(repatchBuffer.codeBlock()), "\n");
+
+ revertCall(
+ repatchBuffer, repatchBuffer.codeBlock()->vm(), callLinkInfo,
+ linkThunkGeneratorFor(kind, registers));
+}
+
+void linkVirtualFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo,
+ CodeSpecializationKind kind, RegisterPreservationMode registers)
+{
+ // FIXME: We could generate a virtual call stub here. This would lead to faster virtual calls
+ // by eliminating the branch prediction bottleneck inside the shared virtual call thunk.
+
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ VM* vm = callerCodeBlock->vm();
+
+ if (shouldShowDisassemblyFor(callerCodeBlock))
+ dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
+
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+ revertCall(repatchBuffer, vm, callLinkInfo, virtualThunkGeneratorFor(kind, registers));
+}
+
+namespace {
+struct CallToCodePtr {
+ CCallHelpers::Call call;
+ MacroAssemblerCodePtr codePtr;
+};
+} // annonymous namespace
+
+void linkPolymorphicCall(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant,