/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
#include "JIT.h"
#include "Arguments.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
+#include "CopiedSpaceInlines.h"
+#include "Debugger.h"
+#include "Heap.h"
+#include "JITInlines.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
-#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "SlowPathCall.h"
+#include "VirtualRegister.h"
namespace JSC {
#if USE(JSVALUE64)
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction)
{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
-
- Jump string_failureCases3 = branch32(LessThan, regT0, TrustedImm32(0));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- // VirtualCallLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- JumpList callLinkFailures;
- Label virtualCallLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- restoreReturnAddressBeforeReturn(regT3);
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
- // VirtualConstructLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkConstruct = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- restoreReturnAddressBeforeReturn(regT3);
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
- // VirtualCall Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
- jump(regT0);
-
- // VirtualConstruct Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileConstruct = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock4.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
- jump(regT0);
-
- // If the parser fails we want to be able to be able to keep going,
- // So we handle this as a parse failure.
- callLinkFailures.link(this);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT1);
- move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
- ret();
-
- // NativeCall Trampoline
- Label nativeCallThunk = privateCompileCTINativeCall(globalData);
- Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
- patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
-#endif
- patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
- patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
- trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
- trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
- trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
- trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-#endif
+ return vm->getCTIStub(nativeCallGenerator);
}
-JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+void JIT::emit_op_mov(Instruction* currentInstruction)
{
- int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
-
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
-
-#if CPU(X86_64)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
-
- // Calling convention: f(edi, esi, edx, ecx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::edi);
-
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
- loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(X86Registers::r9, executableOffsetToFunction));
-
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- call(Address(regT2, executableOffsetToFunction));
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(TrustedImm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT2, executableOffsetToFunction));
-
- // Restore stack space
- addPtr(TrustedImm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
-#endif
-
- // Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
-
- // Return.
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- // Set the return address.
- move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- return nativeCallThunk;
-}
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
-JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
-{
- return globalData->jitStubs->ctiNativeCall();
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emit_op_mov(Instruction* currentInstruction)
+void JIT::emit_op_captured_mov(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- if (dst == m_lastResultBytecodeRegister)
- killLastResultRegister();
- } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
- // If either the src or dst is the cached register go though
- // get/put registers to make sure we track this correctly.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- // Perform the copy via regT1; do not disturb any mapping in regT0.
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
- storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
- }
+ emitGetVirtualRegister(src, regT0);
+ emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_end(Instruction* currentInstruction)
{
- ASSERT(returnValueRegister != callFrameRegister);
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ RELEASE_ASSERT(returnValueGPR != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ emitFunctionEpilogue();
ret();
}
addJump(jump(), target);
}
-void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- emitTimeoutCheck();
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
- addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThanOrEqual, regT0, regT1), target);
- }
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID scratchReg = regT2;
+
+ move(TrustedImmPtr(allocator), allocatorReg);
+ emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_new_object(Instruction* currentInstruction)
+void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+ linkSlowCase(iter);
+ int dst = currentInstruction[1].u.operand;
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ callOperation(operationNewObject, structure);
+ emitStoreCell(dst, returnValueGPR);
}
void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
{
- unsigned baseVal = currentInstruction[1].u.operand;
+ int baseVal = currentInstruction[3].u.operand;
emitGetVirtualRegister(baseVal, regT0);
emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
// Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
+ addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
// Load the operands (baseVal, proto, and value respectively) into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
emitGetVirtualRegister(value, regT2);
- emitGetVirtualRegister(baseVal, regT0);
emitGetVirtualRegister(proto, regT1);
// Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
- // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
- // Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
-
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
Label loop(this);
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ emitLoadStructure(regT2, regT2, regT3);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(false))), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
-{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCallVarargs(currentInstruction);
-}
-
-void JIT::emit_op_construct(Instruction* currentInstruction)
+void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(value, regT0);
+ Jump isCell = emitJumpIfJSCell(regT0);
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- JSVariableObject* globalObject = m_codeBlock->globalObject();
- loadPtr(&globalObject->m_registers, regT0);
- loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
+ Jump done = jump();
+
+ isCell.link(this);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ move(TrustedImm32(0), regT0);
+ Jump notMasqueradesAsUndefined = jump();
+
+ isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT1, regT2);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
+ comparePtr(Equal, regT0, regT1, regT0);
+
+ notMasqueradesAsUndefined.link(this);
+ done.link(this);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+void JIT::emit_op_is_boolean(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
- JSVariableObject* globalObject = m_codeBlock->globalObject();
- loadPtr(&globalObject->m_registers, regT0);
- storePtr(regT1, Address(regT0, currentInstruction[1].u.operand * sizeof(Register)));
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(value, regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+void JIT::emit_op_is_number(Instruction* currentInstruction)
{
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT0);
- loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(value, regT0);
+ test64(NonZero, regT0, tagTypeNumberRegister, regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+void JIT::emit_op_is_string(Instruction* currentInstruction)
{
- int skip = currentInstruction[2].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
-
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT1);
- storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(value, regT0);
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
+
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ emitTagAsBoolImmediate(regT0);
+ Jump done = jump();
+
+ isNotCell.link(this);
+ move(TrustedImm32(ValueFalse), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
- unsigned activation = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- Jump activationCreated = branchTestPtr(NonZero, addressFor(activation));
- Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments));
- activationCreated.link(this);
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(activation, regT2);
- stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
- stubCall.call();
- argumentsNotCreated.link(this);
+ int activation = currentInstruction[1].u.operand;
+ Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
+ emitGetVirtualRegister(activation, regT0);
+ callOperation(operationTearOffActivation, regT0);
+ activationNotCreated.link(this);
}
void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[1].u.operand;
+ int activation = currentInstruction[2].u.operand;
- Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
- stubCall.call();
+ Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset())));
+ emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
+ emitGetVirtualRegister(activation, regT1);
+ callOperation(operationTearOffArguments, regT0, regT1);
argsNotCreated.link(this);
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
+ ASSERT(regT1 != returnValueGPR);
+ ASSERT(returnValueGPR != callFrameRegister);
// Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
+ checkStackPointerAlignment();
+ emitFunctionEpilogue();
ret();
}
void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
+ ASSERT(regT1 != returnValueGPR);
+ ASSERT(returnValueGPR != callFrameRegister);
// Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
- loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
- Jump notObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR);
+ Jump notObject = emitJumpIfCellNotObject(returnValueGPR);
// Return.
- restoreReturnAddressBeforeReturn(regT1);
+ emitFunctionEpilogue();
ret();
// Return 'this' in %eax.
notJSCell.link(this);
notObject.link(this);
- emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR);
// Return.
- restoreReturnAddressBeforeReturn(regT1);
+ emitFunctionEpilogue();
ret();
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ addSlowCase(branchStructure(NotEqual,
+ Address(regT0, JSCell::structureIDOffset()),
+ m_vm->stringStructure.get()));
isImm.link(this);
if (dst != src)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
-{
- // Fast case
- void* globalObject = m_codeBlock->globalObject();
- unsigned currentIndex = m_globalResolveInfoIndex++;
- GlobalResolveInfo* resolveInfoAddress = &(m_codeBlock->globalResolveInfo(currentIndex));
-
- // Check Structure of global object
- move(TrustedImmPtr(globalObject), regT0);
- move(TrustedImmPtr(resolveInfoAddress), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
-
- // Load cached property
- // Assume that the global object always uses external storage.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT0);
- load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1);
- loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
+ slowPathCall.call();
}
void JIT::emit_op_not(Instruction* currentInstruction)
// Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
// clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
// Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
isNonZero.link(this);
}
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
- Jump wasNotImmediate = jump();
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ emitLoadStructure(regT0, regT2, regT1);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
+ Jump masqueradesGlobalObjectIsForeign = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
- wasNotImmediate.link(this);
+ isNotMasqueradesAsUndefined.link(this);
+ masqueradesGlobalObjectIsForeign.link(this);
};
void JIT::emit_op_jneq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ emitLoadStructure(regT0, regT2, regT1);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
}
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ int src = currentInstruction[1].u.operand;
+ Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target);
-}
-
-void JIT::emit_op_jsr(Instruction* currentInstruction)
-{
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
- killLastResultRegister();
-}
-
-void JIT::emit_op_sret(Instruction* currentInstruction)
-{
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
- killLastResultRegister();
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target);
}
void JIT::emit_op_eq(Instruction* currentInstruction)
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- not32(regT0);
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_jtrue(Instruction* currentInstruction)
{
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
+ Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
addJump(emitJumpIfImmediateInteger(regT0), target);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
isZero.link(this);
}
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xorPtr(regT1, regT0);
+ xor64(regT1, regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- orPtr(regT1, regT0);
+ or64(regT1, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
- ASSERT(regT0 == returnValueRegister);
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
+ ASSERT(regT0 == returnValueGPR);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperationNoExceptionCheck(operationThrow, regT0);
+ jumpToExceptionHandler();
}
void JIT::emit_op_get_pnames(Instruction* currentInstruction)
emitGetVirtualRegister(base, regT0);
if (!m_codeBlock->isKnownNotImmediate(base))
isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
- }
+ if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode())
+ isNotObject.append(emitJumpIfCellNotObject(regT0));
// We could inline the case where you have a valid cache, but
// this call doesn't seem to be hot.
Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
+ callOperation(operationGetPNames, regT0);
+ emitStoreCell(dst, returnValueGPR);
load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- storePtr(tagTypeNumberRegister, payloadFor(i));
+ store64(tagTypeNumberRegister, addressFor(i));
store32(TrustedImm32(Int32Tag), intTagFor(size));
store32(regT3, intPayloadFor(size));
Jump end = jump();
move(regT0, regT1);
and32(TrustedImm32(~TagBitUndefined), regT1);
addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
-
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT0);
- toObjectStubCall.call(base);
+ callOperation(operationToObject, base, regT0);
jump().linkTo(isObject, this);
end.link(this);
loadPtr(addressFor(it), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+ load64(BaseIndex(regT2, regT0, TimesEight), regT2);
emitPutVirtualRegister(dst, regT2);
emitGetVirtualRegister(base, regT0);
// Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ emitLoadStructure(regT0, regT2, regT3);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
// Test base's prototype chain
addJump(branchTestPtr(Zero, Address(regT3)), target);
Label checkPrototype(this);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ emitLoadStructure(regT2, regT2, regT1);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
addPtr(TrustedImm32(sizeof(Structure*)), regT3);
branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
// Slow case: Ask the object if i is valid.
callHasProperty.link(this);
emitGetVirtualRegister(dst, regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
+ callOperation(operationHasProperty, regT0, regT1);
// Test for valid key.
addJump(branchTest32(NonZero, regT0), target);
end.link(this);
}
-void JIT::emit_op_push_scope(Instruction* currentInstruction)
+void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_scope);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationPushWithScope, regT0);
}
void JIT::emit_op_pop_scope(Instruction*)
{
- JITStubCall(this, cti_op_pop_scope).call();
+ callOperation(operationPopScope);
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+
+ // Jump slow if both are cells (to cover strings).
move(regT0, regT2);
- orPtr(regT1, regT2);
+ or64(regT1, regT2);
addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
+ // if it's a double.
+ Jump leftOK = emitJumpIfImmediateInteger(regT0);
+ addSlowCase(emitJumpIfImmediateNumber(regT0));
+ leftOK.link(this);
+ Jump rightOK = emitJumpIfImmediateInteger(regT1);
+ addSlowCase(emitJumpIfImmediateNumber(regT1));
+ rightOK.link(this);
if (type == OpStrictEq)
- compare32(Equal, regT1, regT0, regT0);
+ compare64(Equal, regT1, regT0, regT0);
else
- compare32(NotEqual, regT1, regT0, regT0);
+ compare64(NotEqual, regT1, regT0, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
compileOpStrictEq(currentInstruction, OpNStrictEq);
}
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+void JIT::emit_op_to_number(Instruction* currentInstruction)
{
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(NumberType)));
-
- wasImmediate.link(this);
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand);
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
- killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- move(regT0, callFrameRegister);
- peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
- loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
+ // Gotta restore the tag registers. We could be throwing from FTL, which may
+ // clobber them.
+ move(TrustedImm64(TagTypeNumber), tagTypeNumberRegister);
+ move(TrustedImm64(TagMask), tagMaskRegister);
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister);
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ load64(Address(regT3, VM::exceptionOffset()), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
-void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw_reference_error);
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- stubCall.call();
+ move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0);
+ callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call();
-#endif
+ load32(codeBlock()->debuggerRequestsAddress(), regT0);
+ Jump noDebuggerRequests = branchTest32(Zero, regT0);
+ callOperation(operationDebug, currentInstruction[1].u.operand);
+ noDebuggerRequests.link(this);
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- test8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ move(TrustedImm32(0), regT0);
+ Jump wasNotMasqueradesAsUndefined = jump();
+ isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
+ comparePtr(Equal, regT0, regT2, regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(Equal, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
+ wasNotMasqueradesAsUndefined.link(this);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
void JIT::emit_op_neq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- test8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ move(TrustedImm32(1), regT0);
+ Jump wasNotMasqueradesAsUndefined = jump();
+ isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
+ comparePtr(NotEqual, regT0, regT2, regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
+ wasNotMasqueradesAsUndefined.link(this);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
// object lifetime and increasing GC pressure.
size_t count = m_codeBlock->m_numVars;
for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
+ emitInitRegister(virtualRegisterForLocal(j).offset());
+
+ emitWriteBarrier(m_codeBlock->ownerExecutable());
+ emitEnterOptimizationCheck();
}
void JIT::emit_op_create_activation(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
- Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
- emitPutVirtualRegister(dst);
+ Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ callOperation(operationCreateActivation, 0);
+ emitStoreCell(dst, returnValueGPR);
activationCreated.link(this);
}
void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+
+ callOperation(operationCreateArguments);
+ emitStoreCell(dst, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR);
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(dst);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
argsCreated.link(this);
}
void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
- storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+ store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
}
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
+void JIT::emit_op_to_this(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ WriteBarrierBase<Structure>* cachedStructure = ¤tInstruction[2].u.structure;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
-}
+ emitJumpSlowCaseIfNotJSCell(regT1);
-void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump notNull = branchTestPtr(NonZero, regT0);
- move(TrustedImmPtr(JSValue::encode(jsNull())), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump setThis = jump();
- notNull.link(this);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump notAnObject = branch8(NotEqual, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
- addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
- isImmediate.link(this);
- notAnObject.link(this);
- setThis.link(this);
+ addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ loadPtr(cachedStructure, regT2);
+ addSlowCase(branchTestPtr(Zero, regT2));
+ load32(Address(regT2, Structure::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
}
void JIT::emit_op_get_callee(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ int result = currentInstruction[1].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[2].u.jsCell;
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+
+ loadPtr(cachedFunction, regT2);
+ addSlowCase(branchPtr(NotEqual, regT0, regT2));
+
emitPutVirtualRegister(result);
}
+void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee);
+ slowPathCall.call();
+}
+
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(currentInstruction[2].u.operand, regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ int callee = currentInstruction[2].u.operand;
+ RegisterID calleeReg = regT0;
+ RegisterID resultReg = regT0;
+ RegisterID allocatorReg = regT1;
+ RegisterID structureReg = regT2;
+ RegisterID scratchReg = regT3;
+
+ emitGetVirtualRegister(callee, calleeReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ addSlowCase(branchTestPtr(Zero, allocatorReg));
+
+ emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+ linkSlowCase(iter); // doesn't have an allocation profile
+ linkSlowCase(iter); // allocation failed
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
- noProfiler.link(this);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
+ slowPathCall.call();
+}
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileWillCall, regT0);
+ profilerDone.link(this);
}
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT1));
-
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
- noProfiler.link(this);
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileDidCall, regT0);
+ profilerDone.link(this);
}
// Slow cases
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_convert_this_strict);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- }
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
+ linkSlowCase(iter);
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(regT0);
- stubPutByValCall.addArgument(property, regT2);
- stubPutByValCall.addArgument(value, regT2);
- stubPutByValCall.call();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive);
+ slowPathCall.call();
}
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not);
+ slowPathCall.call();
}
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), currentInstruction[2].u.operand); // inverted!
}
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand);
}
void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
}
void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
}
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ callOperation(operationCompareEq, regT0, regT1);
+ emitTagAsBoolImmediate(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
+ callOperation(operationCompareEq, regT0, regT1);
xor32(TrustedImm32(0x1), regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitTagAsBoolImmediate(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned baseVal = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int baseVal = currentInstruction[3].u.operand;
linkSlowCaseIfNotJSCell(iter, baseVal);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(baseVal, regT2);
- stubCall.call();
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(baseVal, regT1);
+ callOperation(operationCheckHasInstance, dst, regT0, regT1);
+
+ emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
}
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(baseVal, regT2);
- stubCall.addArgument(proto, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallVarargsSlowCase(currentInstruction, iter);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(proto, regT1);
+ callOperation(operationInstanceOf, dst, regT0, regT1);
}
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_jsnumber);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
+ slowPathCall.call();
}
void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
sub32(TrustedImm32(1), regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(base, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.call(dst);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ callOperation(operationGetArgumentsLength, dst, base);
}
void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
emitGetVirtualRegister(property, regT1);
addSlowCase(emitJumpIfNotImmediateInteger(regT1));
add32(TrustedImm32(1), regT1);
// regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
addSlowCase(branch32(AboveOrEqual, regT1, regT2));
-
- Jump skipOutofLineParams;
- int numArgs = m_codeBlock->m_numParameters;
- if (numArgs) {
- Jump notInInPlaceArgs = branch32(AboveOrEqual, regT1, Imm32(numArgs));
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
- loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
- skipOutofLineParams = jump();
- notInInPlaceArgs.link(this);
- }
-
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
- mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
- subPtr(regT2, regT0);
- loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
- if (numArgs)
- skipOutofLineParams.link(this);
+
+ signExtend32ToPtr(regT1, regT1);
+ load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ emitValueProfilingSite();
emitPutVirtualRegister(dst, regT0);
}
void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
linkSlowCase(iter);
Jump skipArgumentsCreation = jump();
linkSlowCase(iter);
linkSlowCase(iter);
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(arguments);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
+ callOperation(operationCreateArguments);
+ emitStoreCell(arguments, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR);
skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(arguments, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
+ emitGetVirtualRegister(arguments, regT0);
+ emitGetVirtualRegister(property, regT1);
+ callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1);
}
#endif // USE(JSVALUE64)
-void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+void JIT::emit_op_touch_entry(Instruction* currentInstruction)
{
- int skip = currentInstruction[5].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated())
+ return;
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- activationNotCreated.link(this);
- }
- while (skip--) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- }
- emit_op_resolve_global(currentInstruction, true);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_loop_hint(Instruction*)
{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
- int skip = currentInstruction[5].u.operand;
- while (skip--)
- linkSlowCase(iter);
- JITStubCall resolveStubCall(this, cti_op_resolve);
- resolveStubCall.addArgument(TrustedImmPtr(ident));
- resolveStubCall.call(dst);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.call(dst);
-}
+ // Emit the JIT optimization check:
+ if (canBeOptimized()) {
+ addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+ }
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ // Emit the watchdog timer check:
+ if (m_vm->watchdog && m_vm->watchdog->isEnabled())
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog->timerDidFireAddress())));
}
-void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
- int registerOffset = currentInstruction[3].u.operand;
- ASSERT(argsOffset <= registerOffset);
-
- int expectedParams = m_codeBlock->m_numParameters - 1;
- // Don't do inline copying if we aren't guaranteed to have a single stream
- // of arguments
- if (expectedParams) {
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
- return;
+#if ENABLE(DFG_JIT)
+ // Emit the slow path for the JIT optimization check:
+ if (canBeOptimized()) {
+ linkSlowCase(iter);
+
+ callOperation(operationOptimize, m_bytecodeOffset);
+ Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
+ if (!ASSERT_DISABLED) {
+ Jump ok = branchPtr(MacroAssembler::Above, regT0, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
+ abortWithReason(JITUnreasonableLoopHintJumpTarget);
+ ok.link(this);
+ }
+ jump(returnValueGPR);
+ noOptimizedEntry.link(this);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
}
-
-#if USE(JSVALUE32_64)
- addSlowCase(branch32(NotEqual, tagFor(argsOffset), TrustedImm32(JSValue::EmptyValueTag)));
-#else
- addSlowCase(branchTestPtr(NonZero, addressFor(argsOffset)));
#endif
- // Load arg count into regT0
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
- store32(regT0, intPayloadFor(argCountDst));
- Jump endBranch = branch32(Equal, regT0, TrustedImm32(1));
-
- mul32(TrustedImm32(sizeof(Register)), regT0, regT3);
- addPtr(TrustedImm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
- subPtr(regT3, regT1); // regT1 is now the start of the out of line arguments
- addPtr(Imm32(argsOffset * sizeof(Register)), callFrameRegister, regT2); // regT2 is the target buffer
-
- // Bounds check the registerfile
- addPtr(regT2, regT3);
- addPtr(Imm32((registerOffset - argsOffset) * sizeof(Register)), regT3);
- addSlowCase(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));
- sub32(TrustedImm32(1), regT0);
- Label loopStart = label();
- loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(0 - 2 * sizeof(Register))), regT3);
- storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(0 - sizeof(Register))));
-#if USE(JSVALUE32_64)
- loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - 2 * sizeof(Register))), regT3);
- storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - sizeof(Register))));
-#endif
- branchSubPtr(NonZero, TrustedImm32(1), regT0).linkTo(loopStart, this);
- endBranch.link(this);
+ // Emit the slow path of the watchdog timer check:
+ if (m_vm->watchdog && m_vm->watchdog->isEnabled()) {
+ linkSlowCase(iter);
+ callOperation(operationHandleWatchdogTimer);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
+
}
-void JIT::emitSlow_op_load_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
- int expectedParams = m_codeBlock->m_numParameters - 1;
- if (expectedParams)
- return;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
-
- store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
- store32(returnValueRegister, intPayloadFor(argCountDst));
+ callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand));
}
void JIT::emit_op_new_func(Instruction* currentInstruction)
#if USE(JSVALUE32_64)
lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
#else
- lazyJump = branchTestPtr(NonZero, addressFor(dst));
+ lazyJump = branchTest64(NonZero, addressFor(dst));
#endif
}
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+
+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
+ callOperation(operationNewFunction, dst, funcExec);
+
if (currentInstruction[3].u.operand)
lazyJump.link(this);
}
+void JIT::emit_op_new_captured_func(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
+ callOperation(operationNewFunction, dst, funcExpr);
+}
+
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0);
+ callOperation(operationNewArrayWithProfile, dst,
+ currentInstruction[4].u.arrayAllocationProfile, regT0, size);
+}
+
+void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int sizeIndex = currentInstruction[2].u.operand;
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(sizeIndex, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT0);
+#else
+ emitLoad(sizeIndex, regT1, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT1, regT0);
+#endif
}
void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array_buffer);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ const JSValue* values = codeBlock()->constantBuffer(valuesIndex);
+ callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size);
+}
+
+void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet;
+ if (!set || set->state() == IsInvalidated)
+ return;
+#if USE(JSVALUE32_64)
+ linkSlowCase(iter);
+#endif
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov);
+ slowPathCall.call();
}
} // namespace JSC