- m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
-}
-#endif // ENABLE(JIT)
-
-#if ENABLE(LLINT)
-Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC)
-{
- ASSERT(potentialReturnPC);
-
- unsigned returnPCOffset = potentialReturnPC - instructions().begin();
- Instruction* adjustedPC;
- unsigned opcodeLength;
-
- // If we are at a callsite, the LLInt stores the PC after the call
- // instruction rather than the PC of the call instruction. This requires
- // some correcting. If so, we can rely on the fact that the preceding
- // instruction must be one of the call instructions, so either it's a
- // call_varargs or it's a call, construct, or eval.
- //
- // If we are not at a call site, then we need to guard against the
- // possibility of peeking past the start of the bytecode range for this
- // codeBlock. Hence, we do a bounds check before we peek at the
- // potential "preceding" instruction.
- // The bounds check is done by comparing the offset of the potential
- // returnPC with the length of the opcode. If there is room for a call
- // instruction before the returnPC, then the offset of the returnPC must
- // be greater than the size of the call opcode we're looking for.
-
- // The determination of the call instruction present (if we are at a
- // callsite) depends on the following assumptions. So, assert that
- // they are still true:
- ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
-
- // Check for the case of a preceeding op_call_varargs:
- opcodeLength = OPCODE_LENGTH(op_call_varargs);
- adjustedPC = potentialReturnPC - opcodeLength;
- if ((returnPCOffset >= opcodeLength)
- && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_varargs))) {
- return adjustedPC;
- }
-
- // Check for the case of the other 3 call instructions:
- opcodeLength = OPCODE_LENGTH(op_call);
- adjustedPC = potentialReturnPC - opcodeLength;
- if ((returnPCOffset >= opcodeLength)
- && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call)
- || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_construct)
- || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_eval))) {
- return adjustedPC;
- }
-
- // Not a call site. No need to adjust PC. Just return the original.
- return potentialReturnPC;
-}
-#endif // ENABLE(LLINT)
-
-#if ENABLE(JIT)
-ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress)
-{
- for (unsigned i = m_callLinkInfos.size(); i--;) {
- CallLinkInfo& info = m_callLinkInfos[i];
- if (!info.stub)
- continue;
- if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
- continue;
-
- RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
- return info.stub.get();
- }
-
- // The stub routine may have been jettisoned. This is rare, but we have to handle it.
- const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines();
- for (unsigned i = set.size(); i--;) {
- GCAwareJITStubRoutine* genericStub = set.at(i);
- if (!genericStub->isClosureCall())
- continue;
- ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
- if (!stub->code().executableMemory()->contains(returnAddress.value()))
- continue;
- RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
- return stub;
- }
-
- return 0;
-}
-#endif
-
-unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
-{
- UNUSED_PARAM(exec);
- UNUSED_PARAM(returnAddress);
-#if ENABLE(LLINT)
-#if !ENABLE(LLINT_C_LOOP)
- // When using the JIT, we could have addresses that are not bytecode
- // addresses. We check if the return address is in the LLint glue and
- // opcode handlers range here to ensure that we are looking at bytecode
- // before attempting to convert the return address into a bytecode offset.
- //
- // In the case of the C Loop LLInt, the JIT is disabled, and the only
- // valid return addresses should be bytecode PCs. So, we can and need to
- // forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES),
- // then the bytecode "PC"s are actually the opcodeIDs and are not bounded
- // by llint_begin and llint_end.
- if (returnAddress.value() >= LLInt::getCodePtr(llint_begin)
- && returnAddress.value() <= LLInt::getCodePtr(llint_end))
-#endif
- {
- RELEASE_ASSERT(exec->codeBlock());
- RELEASE_ASSERT(exec->codeBlock() == this);
- RELEASE_ASSERT(JITCode::isBaselineCode(getJITType()));
- Instruction* instruction = exec->currentVPC();
- RELEASE_ASSERT(instruction);
-
- instruction = adjustPCIfAtCallSite(instruction);
- return bytecodeOffset(instruction);
- }
-#endif // !ENABLE(LLINT)
-
-#if ENABLE(JIT)
- if (!m_rareData)
- return 1;
- Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
- if (!callIndices.size())
- return 1;
-
- if (getJITCode().getExecutableMemory()->contains(returnAddress.value())) {
- unsigned callReturnOffset = getJITCode().offsetOf(returnAddress.value());
- CallReturnOffsetToBytecodeOffset* result =
- binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
- callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
- RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
- RELEASE_ASSERT(result->bytecodeOffset < instructionCount());
- return result->bytecodeOffset;
- }
- ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress);
- CodeOrigin origin = closureInfo->codeOrigin();
- while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) {
- if (inlineCallFrame->baselineCodeBlock() == this)
- break;
- origin = inlineCallFrame->caller;
- RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
- }
- RELEASE_ASSERT(origin.bytecodeIndex < CodeOrigin::maximumBytecodeIndex);
- unsigned bytecodeIndex = origin.bytecodeIndex;
- RELEASE_ASSERT(bytecodeIndex < instructionCount());
- return bytecodeIndex;
-#endif // ENABLE(JIT)
-
-#if !ENABLE(LLINT) && !ENABLE(JIT)
- return 1;
-#endif