X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..HEAD:/jit/JITOpcodes32_64.cpp diff --git a/jit/JITOpcodes32_64.cpp b/jit/JITOpcodes32_64.cpp index 4f4c5c0..8764cc2 100644 --- a/jit/JITOpcodes32_64.cpp +++ b/jit/JITOpcodes32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer * * Redistribution and use in source and binary forms, with or without @@ -30,469 +30,126 @@ #if USE(JSVALUE32_64) #include "JIT.h" -#include "JITInlineMethods.h" -#include "JITStubCall.h" +#include "CCallHelpers.h" +#include "Debugger.h" +#include "Exception.h" +#include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" +#include "JSEnvironmentRecord.h" #include "JSFunction.h" -#include "JSPropertyNameIterator.h" +#include "JSNameScope.h" +#include "JSPropertyNameEnumerator.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "RepatchBuffer.h" +#include "SlowPathCall.h" +#include "TypeProfilerLog.h" +#include "VirtualRegister.h" namespace JSC { -void JIT::privateCompileCTIMachineTrampolines(RefPtr* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines) -{ -#if ENABLE(JIT_USE_SOFT_MODULO) - Label softModBegin = align(); - softModulo(); -#endif -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - // (1) This function provides fast property access for string length - Label stringLengthBegin = align(); - - // regT0 holds payload, regT1 holds tag - - Jump string_failureCases1 = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)); - - // Checks out okay! - get the length from the Ustring. - load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2); - - Jump string_failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX)); - move(regT2, regT0); - move(TrustedImm32(JSValue::Int32Tag), regT1); - - ret(); -#endif - - JumpList callLinkFailures; - // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct. -#if ENABLE(JIT_OPTIMIZE_CALL) - // VirtualCallLink Trampoline - // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. - Label virtualCallLinkBegin = align(); - compileOpCallInitializeCallFrame(); - preserveReturnAddressAfterCall(regT3); - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - restoreArgumentReference(); - Call callLazyLinkCall = call(); - callLinkFailures.append(branchTestPtr(Zero, regT0)); - restoreReturnAddressBeforeReturn(regT3); - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); - jump(regT0); - - // VirtualConstructLink Trampoline - // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. - Label virtualConstructLinkBegin = align(); - compileOpCallInitializeCallFrame(); - preserveReturnAddressAfterCall(regT3); - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - restoreArgumentReference(); - Call callLazyLinkConstruct = call(); - restoreReturnAddressBeforeReturn(regT3); - callLinkFailures.append(branchTestPtr(Zero, regT0)); - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); - jump(regT0); - -#endif // ENABLE(JIT_OPTIMIZE_CALL) - - // VirtualCall Trampoline - // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. - Label virtualCallBegin = align(); - compileOpCallInitializeCallFrame(); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - - Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0)); - preserveReturnAddressAfterCall(regT3); - restoreArgumentReference(); - Call callCompileCall = call(); - callLinkFailures.append(branchTestPtr(Zero, regT0)); - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); - restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - hasCodeBlock3.link(this); - - loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0); - jump(regT0); - - // VirtualConstruct Trampoline - // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. - Label virtualConstructBegin = align(); - compileOpCallInitializeCallFrame(); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - - Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0)); - preserveReturnAddressAfterCall(regT3); - restoreArgumentReference(); - Call callCompileCconstruct = call(); - callLinkFailures.append(branchTestPtr(Zero, regT0)); - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); - restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - hasCodeBlock4.link(this); - - loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0); - jump(regT0); - - // If the parser fails we want to be able to be able to keep going, - // So we handle this as a parse failure. - callLinkFailures.link(this); - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - restoreReturnAddressBeforeReturn(regT1); - move(TrustedImmPtr(&globalData->exceptionLocation), regT2); - storePtr(regT1, regT2); - poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); - poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value())); - ret(); - - // NativeCall Trampoline - Label nativeCallThunk = privateCompileCTINativeCall(globalData); - Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true); - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); - Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); - Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); -#endif - - // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator); - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail)); -#endif -#if ENABLE(JIT_OPTIMIZE_CALL) - patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall)); - patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct)); -#endif - patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile)); - patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile)); - - CodeRef finalCode = patchBuffer.finalizeCode(); - *executablePool = finalCode.m_executablePool; - - trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin); - trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin); -#if ENABLE(JIT_OPTIMIZE_NATIVE_CALL) - trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk); - trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk); -#endif -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin); -#endif -#if ENABLE(JIT_OPTIMIZE_CALL) - trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin); - trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin); -#endif -#if ENABLE(JIT_USE_SOFT_MODULO) - trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin); -#endif -} - -JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct) -{ - int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function); - - Label nativeCallThunk = align(); - - emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock); - -#if CPU(X86) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - peek(regT1); - emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC); - - // Calling convention: f(ecx, edx, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, X86Registers::ecx); - - subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. - - // call the function - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1); - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1); - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - call(Address(regT1, executableOffsetToFunction)); - - addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); - -#elif CPU(ARM) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(r0 == regT0, r1 == regT1, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, ARMRegisters::r0); - - // call the function - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - call(Address(regT2, executableOffsetToFunction)); - - restoreReturnAddressBeforeReturn(regT3); -#elif CPU(SH4) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); - emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(r0 == regT4, r1 == regT5, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, regT4); - - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - - call(Address(regT2, executableOffsetToFunction), regT0); - restoreReturnAddressBeforeReturn(regT3); -#elif CPU(MIPS) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(a0, a1, a2, a3); - // Host function signature: f(ExecState*); - - // Allocate stack space for 16 bytes (8-byte aligned) - // 16 bytes (unused) for 4 arguments - subPtr(TrustedImm32(16), stackPointerRegister); - - // Setup arg0 - move(callFrameRegister, MIPSRegisters::a0); - - // Call - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2); - loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - call(Address(regT2, executableOffsetToFunction)); - - // Restore stack space - addPtr(TrustedImm32(16), stackPointerRegister); - - restoreReturnAddressBeforeReturn(regT3); - -#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) -#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." -#else - UNUSED_PARAM(executableOffsetToFunction); - breakpoint(); -#endif // CPU(X86) - - // Check for an exception - Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); - - // Return. - ret(); - - // Handle an exception - sawException.link(this); - - // Grab the return address. - preserveReturnAddressAfterCall(regT1); - - move(TrustedImmPtr(&globalData->exceptionLocation), regT2); - storePtr(regT1, regT2); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); - - // Set the return address. - move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); - restoreReturnAddressBeforeReturn(regT1); - - ret(); - - return nativeCallThunk; -} - -JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr executablePool, JSGlobalData* globalData, NativeFunction func) +JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) { Call nativeCall; - Label nativeCallThunk = align(); - emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock); + emitFunctionPrologue(); + emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); + storePtr(callFrameRegister, &m_vm->topCallFrame); #if CPU(X86) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - peek(regT1); - emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC); - // Calling convention: f(ecx, edx, ...); // Host function signature: f(ExecState*); move(callFrameRegister, X86Registers::ecx); - subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. - - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - - // call the function - nativeCall = call(); - - addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); - -#elif CPU(ARM) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(r0 == regT0, r1 == regT1, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, ARMRegisters::r0); - - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); + subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call. + storePtr(X86Registers::ecx, Address(stackPointerRegister)); // call the function nativeCall = call(); - restoreReturnAddressBeforeReturn(regT3); - -#elif CPU(MIPS) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(a0, a1, a2, a3); - // Host function signature: f(ExecState*); + addPtr(TrustedImm32(8), stackPointerRegister); - // Allocate stack space for 16 bytes (8-byte aligned) - // 16 bytes (unused) for 4 arguments +#elif CPU(ARM) || CPU(SH4) || CPU(MIPS) +#if CPU(MIPS) + // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. subPtr(TrustedImm32(16), stackPointerRegister); +#endif - // Setup arg0 - move(callFrameRegister, MIPSRegisters::a0); + // Calling convention is f(argumentGPR0, argumentGPR1, ...). + // Host function signature is f(ExecState*). + move(callFrameRegister, argumentGPR0); + + emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1); + loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - // Call - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2); - loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - // call the function nativeCall = call(); +#if CPU(MIPS) // Restore stack space addPtr(TrustedImm32(16), stackPointerRegister); +#endif restoreReturnAddressBeforeReturn(regT3); -#elif CPU(SH4) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); - emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); - - // Calling convention: f(r0 == regT4, r1 == regT5, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, regT4); - - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - - // call the function - nativeCall = call(); - - restoreReturnAddressBeforeReturn(regT3); -#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) -#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." #else - breakpoint(); +#error "JIT not supported on this platform." + abortWithReason(JITNotSupported); #endif // CPU(X86) // Check for an exception - Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0)); + emitFunctionEpilogue(); // Return. ret(); // Handle an exception sawException.link(this); - // Grab the return address. - preserveReturnAddressAfterCall(regT1); + storePtr(callFrameRegister, &m_vm->topCallFrame); - move(TrustedImmPtr(&globalData->exceptionLocation), regT2); - storePtr(regT1, regT2); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); +#if CPU(X86) + addPtr(TrustedImm32(-4), stackPointerRegister); + loadPtr(Address(callFrameRegister), X86Registers::ecx); + push(X86Registers::ecx); +#else + loadPtr(Address(callFrameRegister), argumentGPR0); +#endif + move(TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), regT3); + call(regT3); - // Set the return address. - move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); - restoreReturnAddressBeforeReturn(regT1); +#if CPU(X86) + addPtr(TrustedImm32(8), stackPointerRegister); +#endif - ret(); + jumpToExceptionHandler(); // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(*m_globalData, this, executablePool); + LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID); patchBuffer.link(nativeCall, FunctionPtr(func)); - patchBuffer.finalizeCode(); - - return patchBuffer.trampolineAt(nativeCallThunk); + return FINALIZE_CODE(patchBuffer, ("JIT CTI native call")); } void JIT::emit_op_mov(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + if (m_codeBlock->isConstantRegisterIndex(src)) emitStore(dst, getConstantOperand(src)); else { emitLoad(src, regT1, regT0); emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0); } } void JIT::emit_op_end(Instruction* currentInstruction) { - ASSERT(returnValueRegister != callFrameRegister); + ASSERT(returnValueGPR != callFrameRegister); emitLoad(currentInstruction[1].u.operand, regT1, regT0); - restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast(sizeof(Register)))); + emitFunctionEpilogue(); ret(); } @@ -502,59 +159,33 @@ void JIT::emit_op_jmp(Instruction* currentInstruction) addJump(jump(), target); } -void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) -{ - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - - emitTimeoutCheck(); - - if (isOperandConstantImmediateInt(op1)) { - emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target); - return; - } - - if (isOperandConstantImmediateInt(op2)) { - emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); - return; - } - - emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); - addJump(branch32(LessThanOrEqual, regT0, regT2), target); -} - -void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_new_object(Instruction* currentInstruction) { - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; + Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); + size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); + MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize); - if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) - linkSlowCase(iter); // int32 check - linkSlowCase(iter); // int32 check + RegisterID resultReg = regT0; + RegisterID allocatorReg = regT1; + RegisterID scratchReg = regT2; - JITStubCall stubCall(this, cti_op_loop_if_lesseq); - stubCall.addArgument(op1); - stubCall.addArgument(op2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); + move(TrustedImmPtr(allocator), allocatorReg); + emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg); + emitStoreCell(currentInstruction[1].u.operand, resultReg); } -void JIT::emit_op_new_object(Instruction* currentInstruction) +void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector::iterator& iter) { - JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand); + linkSlowCase(iter); + int dst = currentInstruction[1].u.operand; + Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); + callOperation(operationNewObject, structure); + emitStoreCell(dst, returnValueGPR); } void JIT::emit_op_check_has_instance(Instruction* currentInstruction) { - unsigned baseVal = currentInstruction[1].u.operand; + int baseVal = currentInstruction[3].u.operand; emitLoadPayload(baseVal, regT0); @@ -562,21 +193,18 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(baseVal); // Check that baseVal 'ImplementsHasInstance'. - loadPtr(Address(regT0, JSCell::structureOffset()), regT0); - addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance))); + addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; - unsigned baseVal = currentInstruction[3].u.operand; - unsigned proto = currentInstruction[4].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int proto = currentInstruction[3].u.operand; // Load the operands into registers. // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. emitLoadPayload(value, regT2); - emitLoadPayload(baseVal, regT0); emitLoadPayload(proto, regT1); // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance. @@ -584,13 +212,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(proto); // Check that prototype is an object - loadPtr(Address(regT1, JSCell::structureOffset()), regT3); - addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType))); - - // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this. - // Check that baseVal 'ImplementsDefaultHasInstance'. - loadPtr(Address(regT0, JSCell::structureOffset()), regT0); - addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + addSlowCase(emitJumpIfCellNotObject(regT1)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -600,7 +222,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2); load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); branchTest32(NonZero, regT2).linkTo(loop, this); @@ -615,148 +237,117 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned baseVal = currentInstruction[1].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int baseVal = currentInstruction[3].u.operand; linkSlowCaseIfNotJSCell(iter, baseVal); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_check_has_instance); - stubCall.addArgument(baseVal); - stubCall.call(); + emitLoad(value, regT1, regT0); + emitLoad(baseVal, regT3, regT2); + callOperation(operationCheckHasInstance, dst, regT1, regT0, regT3, regT2); + + emitJumpSlowToHot(jump(), currentInstruction[4].u.operand); } void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; - unsigned baseVal = currentInstruction[3].u.operand; - unsigned proto = currentInstruction[4].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int proto = currentInstruction[3].u.operand; linkSlowCaseIfNotJSCell(iter, value); linkSlowCaseIfNotJSCell(iter, proto); linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_instanceof); - stubCall.addArgument(value); - stubCall.addArgument(baseVal); - stubCall.addArgument(proto); - stubCall.call(dst); + emitLoad(value, regT1, regT0); + emitLoad(proto, regT3, regT2); + callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2); } -void JIT::emit_op_get_global_var(Instruction* currentInstruction) +void JIT::emit_op_is_undefined(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - JSGlobalObject* globalObject = m_codeBlock->globalObject(); - ASSERT(globalObject->isGlobalObject()); - int index = currentInstruction[2].u.operand; - - loadPtr(&globalObject->m_registers, regT2); - - emitLoad(index, regT1, regT0, regT2); - emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0); -} - -void JIT::emit_op_put_global_var(Instruction* currentInstruction) -{ - JSGlobalObject* globalObject = m_codeBlock->globalObject(); - ASSERT(globalObject->isGlobalObject()); - int index = currentInstruction[1].u.operand; int value = currentInstruction[2].u.operand; - + emitLoad(value, regT1, regT0); + Jump isCell = branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(&globalObject->m_registers, regT2); - emitStore(index, regT1, regT0, regT2); - map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0); + compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT0); + Jump done = jump(); + + isCell.link(this); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(0), regT0); + Jump notMasqueradesAsUndefined = jump(); + + isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); + compare32(Equal, regT0, regT1, regT0); + + notMasqueradesAsUndefined.link(this); + done.link(this); + emitStoreBool(dst, regT0); } -void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) +void JIT::emit_op_is_boolean(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int index = currentInstruction[2].u.operand; - int skip = currentInstruction[3].u.operand; - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain(); - ASSERT(skip || !checkTopLevel); - if (checkTopLevel && skip--) { - Jump activationNotCreated; - if (checkTopLevel) - activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag)); - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); - activationNotCreated.link(this); - } - while (skip--) - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); - - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2); - - emitLoad(index, regT1, regT0, regT2); - emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0); + int value = currentInstruction[2].u.operand; + + emitLoadTag(value, regT0); + compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0); + emitStoreBool(dst, regT0); } -void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) +void JIT::emit_op_is_number(Instruction* currentInstruction) { - int index = currentInstruction[1].u.operand; - int skip = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; - - emitLoad(value, regT1, regT0); - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain(); - ASSERT(skip || !checkTopLevel); - if (checkTopLevel && skip--) { - Jump activationNotCreated; - if (checkTopLevel) - activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag)); - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); - activationNotCreated.link(this); - } - while (skip--) - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); - - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2); - - emitStore(index, regT1, regT0, regT2); - map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0); + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + + emitLoadTag(value, regT0); + add32(TrustedImm32(1), regT0); + compare32(Below, regT0, TrustedImm32(JSValue::LowestTag + 1), regT0); + emitStoreBool(dst, regT0); } -void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) +void JIT::emit_op_is_string(Instruction* currentInstruction) { - unsigned activation = currentInstruction[1].u.operand; - unsigned arguments = currentInstruction[2].u.operand; - Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); - Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), TrustedImm32(JSValue::EmptyValueTag)); - activationCreated.link(this); - JITStubCall stubCall(this, cti_op_tear_off_activation); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)); - stubCall.call(); - argumentsNotCreated.link(this); + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + + emitLoad(value, regT1, regT0); + Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); + + compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(0), regT0); + + done.link(this); + emitStoreBool(dst, regT0); } -void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) +void JIT::emit_op_is_object(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; - Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), TrustedImm32(JSValue::EmptyValueTag)); - JITStubCall stubCall(this, cti_op_tear_off_arguments); - stubCall.addArgument(unmodifiedArgumentsRegister(dst)); - stubCall.call(); - argsNotCreated.link(this); -} + emitLoad(value, regT1, regT0); + Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); -void JIT::emit_op_resolve(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); + compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(0), regT0); + + done.link(this); + emitStoreBool(dst, regT0); } void JIT::emit_op_to_primitive(Instruction* currentInstruction) @@ -767,100 +358,31 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitLoad(src, regT1, regT0); Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr))); + addSlowCase(emitJumpIfCellObject(regT0)); isImm.link(this); if (dst != src) emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0); } void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector::iterator& iter) { - int dst = currentInstruction[1].u.operand; - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_to_primitive); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive); + slowPathCall.call(); } void JIT::emit_op_strcat(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_strcat); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_ensure_property_exists); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_skip(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_skip); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic) -{ - // FIXME: Optimize to use patching instead of so many memory accesses. - - unsigned dst = currentInstruction[1].u.operand; - void* globalObject = m_codeBlock->globalObject(); - - unsigned currentIndex = m_globalResolveInfoIndex++; - GlobalResolveInfo* resolveInfoAddress = &m_codeBlock->globalResolveInfo(currentIndex); - - - // Verify structure. - move(TrustedImmPtr(globalObject), regT0); - move(TrustedImmPtr(resolveInfoAddress), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1); - addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); - - // Load property. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2); - load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3); - load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload - load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag - emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0); -} - -void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand); - - unsigned currentIndex = m_globalResolveInfoIndex++; - - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_resolve_global); - stubCall.addArgument(TrustedImmPtr(ident)); - stubCall.addArgument(Imm32(currentIndex)); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat); + slowPathCall.call(); } void JIT::emit_op_not(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoadTag(src, regT0); @@ -873,19 +395,15 @@ void JIT::emit_op_not(Instruction* currentInstruction) void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_not); - stubCall.addArgument(src); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not); + slowPathCall.call(); } void JIT::emit_op_jfalse(Instruction* currentInstruction) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; emitLoad(cond, regT1, regT0); @@ -897,14 +415,14 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction) void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; linkSlowCase(iter); if (supportsFloatingPoint()) { // regT1 contains the tag from the hot path. - Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag)); + Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); emitLoadDouble(cond, fpRegT0); emitJumpSlowToHot(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target); @@ -913,15 +431,13 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; linkSlowCase(iter); if (supportsFloatingPoint()) { // regT1 contains the tag from the hot path. - Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag)); + Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); emitLoadDouble(cond, fpRegT0); emitJumpSlowToHot(branchDoubleNonZero(fpRegT0, fpRegT1), target); @@ -949,50 +465,48 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, VectorglobalObject()), regT0); + addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); + Jump masqueradesGlobalObjectIsForeign = jump(); // Now handle the immediate cases - undefined & null isImmediate.link(this); - ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1)); or32(TrustedImm32(1), regT1); addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), target); - wasNotImmediate.link(this); + isNotMasqueradesAsUndefined.link(this); + masqueradesGlobalObjectIsForeign.link(this); } void JIT::emit_op_jneq_null(Instruction* currentInstruction) { - unsigned src = currentInstruction[1].u.operand; + int src = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); - + addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null @@ -1007,34 +521,20 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) { - unsigned src = currentInstruction[1].u.operand; - JSCell* ptr = currentInstruction[2].u.jsCell.get(); + int src = currentInstruction[1].u.operand; + Special::Pointer ptr = currentInstruction[2].u.specialPointer; unsigned target = currentInstruction[3].u.operand; emitLoad(src, regT1, regT0); addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target); - addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target); -} - -void JIT::emit_op_jsr(Instruction* currentInstruction) -{ - int retAddrDst = currentInstruction[1].u.operand; - int target = currentInstruction[2].u.operand; - DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); - addJump(jump(), target); - m_jsrSites.append(JSRInfo(storeLocation, label())); -} - -void JIT::emit_op_sret(Instruction* currentInstruction) -{ - jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); + addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target); } void JIT::emit_op_eq(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; emitLoad2(src1, regT1, regT0, src2, regT3, regT2); addSlowCase(branch32(NotEqual, regT1, regT3)); @@ -1048,9 +548,9 @@ void JIT::emit_op_eq(Instruction* currentInstruction) void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned op1 = currentInstruction[2].u.operand; - unsigned op2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int op1 = currentInstruction[2].u.operand; + int op2 = currentInstruction[3].u.operand; JumpList storeResult; JumpList genericCase; @@ -1058,33 +558,29 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector: genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr))); - genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(); + callOperation(operationCompareStringEq, regT0, regT2); storeResult.append(jump()); // Generic case. genericCase.append(getSlowCase(iter)); // doubles genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(op1); - stubCallEq.addArgument(op2); - stubCallEq.call(regT0); + emitLoad(op1, regT1, regT0); + emitLoad(op2, regT3, regT2); + callOperation(operationCompareEq, regT1, regT0, regT3, regT2); storeResult.link(this); - emitStoreBool(dst, regT0); + emitStoreBool(dst, returnValueGPR); } void JIT::emit_op_neq(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; emitLoad2(src1, regT1, regT0, src2, regT3, regT2); addSlowCase(branch32(NotEqual, regT1, regT3)); @@ -1098,7 +594,7 @@ void JIT::emit_op_neq(Instruction* currentInstruction) void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; + int dst = currentInstruction[1].u.operand; JumpList storeResult; JumpList genericCase; @@ -1106,49 +602,47 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr))); - genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(regT0); + callOperation(operationCompareStringEq, regT0, regT2); storeResult.append(jump()); // Generic case. genericCase.append(getSlowCase(iter)); // doubles genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(regT1, regT0); - stubCallEq.addArgument(regT3, regT2); - stubCallEq.call(regT0); + callOperation(operationCompareEq, regT1, regT0, regT3, regT2); storeResult.link(this); - xor32(TrustedImm32(0x1), regT0); - emitStoreBool(dst, regT0); + xor32(TrustedImm32(0x1), returnValueGPR); + emitStoreBool(dst, returnValueGPR); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; - emitLoadTag(src1, regT0); - emitLoadTag(src2, regT1); + emitLoad2(src1, regT1, regT0, src2, regT3, regT2); + + // Bail if the tags differ, or are double. + addSlowCase(branch32(NotEqual, regT1, regT3)); + addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); - // Jump to a slow case if either operand is double, or if both operands are - // cells and/or Int32s. - move(regT0, regT2); - and32(regT1, regT2); - addSlowCase(branch32(Below, regT2, TrustedImm32(JSValue::LowestTag))); - addSlowCase(branch32(AboveOrEqual, regT2, TrustedImm32(JSValue::CellTag))); + // Jump to a slow case if both are strings or symbols (non object). + Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); + Jump firstIsObject = emitJumpIfCellObject(regT0); + addSlowCase(emitJumpIfCellNotObject(regT2)); + notCell.link(this); + firstIsObject.link(this); + // Simply compare the payloads. if (type == OpStrictEq) - compare32(Equal, regT0, regT1, regT0); + compare32(Equal, regT0, regT2, regT0); else - compare32(NotEqual, regT0, regT1, regT0); + compare32(NotEqual, regT0, regT2, regT0); emitStoreBool(dst, regT0); } @@ -1160,17 +654,12 @@ void JIT::emit_op_stricteq(Instruction* currentInstruction) void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - + linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_stricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq); + slowPathCall.call(); } void JIT::emit_op_nstricteq(Instruction* currentInstruction) @@ -1180,30 +669,31 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction) void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - + linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_nstricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq); + slowPathCall.call(); } void JIT::emit_op_eq_null(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(0), regT1); + Jump wasNotMasqueradesAsUndefined = jump(); + isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); + compare32(Equal, regT0, regT2, regT1); Jump wasNotImmediate = jump(); isImmediate.link(this); @@ -1213,21 +703,28 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) or32(regT2, regT1); wasNotImmediate.link(this); + wasNotMasqueradesAsUndefined.link(this); emitStoreBool(dst, regT1); } void JIT::emit_op_neq_null(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - test8(Zero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(1), regT1); + Jump wasNotMasqueradesAsUndefined = jump(); + isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); + compare32(NotEqual, regT0, regT2, regT1); Jump wasNotImmediate = jump(); isImmediate.link(this); @@ -1237,257 +734,152 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) and32(regT2, regT1); wasNotImmediate.link(this); + wasNotMasqueradesAsUndefined.link(this); emitStoreBool(dst, regT1); } -void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_with_base); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(currentInstruction[2].u.operand); -} - -void JIT::emit_op_new_func_exp(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_func_exp); - stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - void JIT::emit_op_throw(Instruction* currentInstruction) { - unsigned exception = currentInstruction[1].u.operand; - JITStubCall stubCall(this, cti_op_throw); - stubCall.addArgument(exception); - stubCall.call(); - -#ifndef NDEBUG - // cti_op_throw always changes it's return address, - // this point in the code should never be reached. - breakpoint(); -#endif + ASSERT(regT0 == returnValueGPR); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperationNoExceptionCheck(operationThrow, regT1, regT0); + jumpToExceptionHandler(); } -void JIT::emit_op_get_pnames(Instruction* currentInstruction) +void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int breakTarget = currentInstruction[5].u.operand; - - JumpList isNotObject; - - emitLoad(base, regT1, regT0); - if (!m_codeBlock->isKnownNotImmediate(base)) - isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) { - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType))); - } - - // We could inline the case where you have a valid cache, but - // this call doesn't seem to be hot. - Label isObject(this); - JITStubCall getPnamesStubCall(this, cti_op_get_pnames); - getPnamesStubCall.addArgument(regT0); - getPnamesStubCall.call(dst); - load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); - store32(TrustedImm32(Int32Tag), intTagFor(i)); - store32(TrustedImm32(0), intPayloadFor(i)); - store32(TrustedImm32(Int32Tag), intTagFor(size)); - store32(regT3, payloadFor(size)); - Jump end = jump(); - - isNotObject.link(this); - addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget); - addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget); - JITStubCall toObjectStubCall(this, cti_to_object); - toObjectStubCall.addArgument(regT1, regT0); - toObjectStubCall.call(base); - jump().linkTo(isObject, this); + emitLoad(currentInstruction[2].u.operand, regT1, regT0); + callOperation(operationPushWithScope, dst, regT1, regT0); +} - end.link(this); +void JIT::emit_op_pop_scope(Instruction* currentInstruction) +{ + int scope = currentInstruction[1].u.operand; + callOperation(operationPopScope, scope); } -void JIT::emit_op_next_pname(Instruction* currentInstruction) +void JIT::emit_op_to_number(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int it = currentInstruction[5].u.operand; - int target = currentInstruction[6].u.operand; - - JumpList callHasProperty; - - Label begin(this); - load32(intPayloadFor(i), regT0); - Jump end = branch32(Equal, regT0, intPayloadFor(size)); - - // Grab key @ i - loadPtr(payloadFor(it), regT1); - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); - load32(BaseIndex(regT2, regT0, TimesEight), regT2); - store32(TrustedImm32(JSValue::CellTag), tagFor(dst)); - store32(regT2, payloadFor(dst)); - - // Increment i - add32(TrustedImm32(1), regT0); - store32(regT0, intPayloadFor(i)); - - // Verify that i is valid: - loadPtr(payloadFor(base), regT0); - - // Test base's structure - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); - - // Test base's prototype chain - loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); - addJump(branchTestPtr(Zero, Address(regT3)), target); - - Label checkPrototype(this); - callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag))); - loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); - addPtr(TrustedImm32(sizeof(Structure*)), regT3); - branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); - - // Continue loop. - addJump(jump(), target); + int src = currentInstruction[2].u.operand; - // Slow case: Ask the object if i is valid. - callHasProperty.link(this); - loadPtr(addressFor(dst), regT1); - JITStubCall stubCall(this, cti_has_property); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(); + emitLoad(src, regT1, regT0); - // Test for valid key. - addJump(branchTest32(NonZero, regT0), target); - jump().linkTo(begin, this); + Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); + addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); + isInt32.link(this); - // End of loop. - end.link(this); + if (src != dst) + emitStore(dst, regT1, regT0); } -void JIT::emit_op_push_scope(Instruction* currentInstruction) +void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector::iterator& iter) { - JITStubCall stubCall(this, cti_op_push_scope); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(currentInstruction[1].u.operand); -} + linkSlowCase(iter); -void JIT::emit_op_pop_scope(Instruction*) -{ - JITStubCall(this, cti_op_pop_scope).call(); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number); + slowPathCall.call(); } -void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) +void JIT::emit_op_to_string(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); - Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); - addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::EmptyValueTag))); - isInt32.link(this); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); + addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); if (src != dst) emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0); } -void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector::iterator& iter) { - int dst = currentInstruction[1].u.operand; - - linkSlowCase(iter); + linkSlowCase(iter); // Not JSCell. + linkSlowCase(iter); // Not JSString. - JITStubCall stubCall(this, cti_op_to_jsnumber); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); + slowPathCall.call(); } -void JIT::emit_op_push_new_scope(Instruction* currentInstruction) +void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_push_new_scope); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(currentInstruction[3].u.operand); - stubCall.call(currentInstruction[1].u.operand); + int dst = currentInstruction[1].u.operand; + emitLoad(currentInstruction[2].u.operand, regT1, regT0); + if (currentInstruction[4].u.operand == JSNameScope::CatchScope) { + callOperation(operationPushCatchScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT1, regT0); + return; + } + + RELEASE_ASSERT(currentInstruction[4].u.operand == JSNameScope::FunctionNameScope); + callOperation(operationPushFunctionNameScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT1, regT0); } void JIT::emit_op_catch(Instruction* currentInstruction) { - // cti_op_throw returns the callFrame for the handler. - move(regT0, callFrameRegister); + move(TrustedImmPtr(m_vm), regT3); + // operationThrow returns the callFrame for the handler. + load32(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); + load32(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); + store32(regT0, Address(regT3, VM::topVMEntryFrameOffset())); + + addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); + + // Now store the exception returned by operationThrow. + load32(Address(regT3, VM::exceptionOffset()), regT2); + move(TrustedImm32(JSValue::CellTag), regT1); - // Now store the exception returned by cti_op_throw. - loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3); - load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset())); unsigned exception = currentInstruction[1].u.operand; - emitStore(exception, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0); -} + emitStore(exception, regT1, regT2); -void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_jmp_scopes); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(); - addJump(jump(), currentInstruction[2].u.operand); + load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + + unsigned thrownValue = currentInstruction[2].u.operand; + emitStore(thrownValue, regT1, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); + SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); - JITStubCall stubCall(this, cti_op_switch_imm); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } void JIT::emit_op_switch_char(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); + SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); - JITStubCall stubCall(this, cti_op_switch_char); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } void JIT::emit_op_switch_string(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; @@ -1495,346 +887,423 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); - JITStubCall stubCall(this, cti_op_switch_string); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchStringWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } -void JIT::emit_op_throw_reference_error(Instruction* currentInstruction) +void JIT::emit_op_throw_static_error(Instruction* currentInstruction) { - unsigned message = currentInstruction[1].u.operand; - - JITStubCall stubCall(this, cti_op_throw_reference_error); - stubCall.addArgument(m_codeBlock->getConstant(message)); - stubCall.call(); + emitLoad(m_codeBlock->getConstant(currentInstruction[1].u.operand), regT1, regT0); + callOperation(operationThrowStaticError, regT1, regT0, currentInstruction[2].u.operand); } void JIT::emit_op_debug(Instruction* currentInstruction) { -#if ENABLE(DEBUG_WITH_BREAKPOINT) - UNUSED_PARAM(currentInstruction); - breakpoint(); -#else - JITStubCall stubCall(this, cti_op_debug); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(); -#endif + load32(codeBlock()->debuggerRequestsAddress(), regT0); + Jump noDebuggerRequests = branchTest32(Zero, regT0); + callOperation(operationDebug, currentInstruction[1].u.operand); + noDebuggerRequests.link(this); } -void JIT::emit_op_enter(Instruction*) +void JIT::emit_op_enter(Instruction* currentInstruction) { + emitEnterOptimizationCheck(); + // Even though JIT code doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. for (int i = 0; i < m_codeBlock->m_numVars; ++i) - emitStore(i, jsUndefined()); -} - -void JIT::emit_op_create_activation(Instruction* currentInstruction) -{ - unsigned activation = currentInstruction[1].u.operand; - - Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); - JITStubCall(this, cti_op_push_activation).call(activation); - activationCreated.link(this); -} - -void JIT::emit_op_create_arguments(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; + emitStore(virtualRegisterForLocal(i).offset(), jsUndefined()); - Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); - - if (m_codeBlock->m_numParameters == 1) - JITStubCall(this, cti_op_create_arguments_no_params).call(); - else - JITStubCall(this, cti_op_create_arguments).call(); - - emitStore(dst, regT1, regT0); - emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0); - - argsCreated.link(this); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); + slowPathCall.call(); } -void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; + int lexicalEnvironment = currentInstruction[1].u.operand; + int scope = currentInstruction[2].u.operand; - emitStore(dst, JSValue()); + emitLoadPayload(currentInstruction[2].u.operand, regT0); + callOperation(operationCreateActivation, regT0); + emitStoreCell(lexicalEnvironment, returnValueGPR); + emitStoreCell(scope, returnValueGPR); } -void JIT::emit_op_get_callee(Instruction* currentInstruction) +void JIT::emit_op_get_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0); + emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); emitStoreCell(dst, regT0); } void JIT::emit_op_create_this(Instruction* currentInstruction) { - unsigned protoRegister = currentInstruction[2].u.operand; - emitLoad(protoRegister, regT1, regT0); - JITStubCall stubCall(this, cti_op_create_this); - stubCall.addArgument(regT1, regT0); - stubCall.call(currentInstruction[1].u.operand); -} + int callee = currentInstruction[2].u.operand; + WriteBarrierBase* cachedFunction = ¤tInstruction[4].u.jsCell; + RegisterID calleeReg = regT0; + RegisterID rareDataReg = regT4; + RegisterID resultReg = regT0; + RegisterID allocatorReg = regT1; + RegisterID structureReg = regT2; + RegisterID cachedFunctionReg = regT4; + RegisterID scratchReg = regT3; -void JIT::emit_op_convert_this(Instruction* currentInstruction) -{ - unsigned thisRegister = currentInstruction[1].u.operand; + emitLoadPayload(callee, calleeReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); + addSlowCase(branchTestPtr(Zero, rareDataReg)); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + addSlowCase(branchTestPtr(Zero, allocatorReg)); - emitLoad(thisRegister, regT1, regT0); + loadPtr(cachedFunction, cachedFunctionReg); + Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); + addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); + hasSeenMultipleCallees.link(this); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); + emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); + emitStoreCell(currentInstruction[1].u.operand, resultReg); +} - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion))); +void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector::iterator& iter) +{ + linkSlowCase(iter); // doesn't have rare data + linkSlowCase(iter); // doesn't have an allocation profile + linkSlowCase(iter); // allocation failed + linkSlowCase(iter); // cached function didn't match - map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); + slowPathCall.call(); } -void JIT::emit_op_convert_this_strict(Instruction* currentInstruction) +void JIT::emit_op_to_this(Instruction* currentInstruction) { - unsigned thisRegister = currentInstruction[1].u.operand; - - emitLoad(thisRegister, regT1, regT0); - - Jump notNull = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)); - emitStore(thisRegister, jsNull()); - Jump setThis = jump(); - notNull.link(this); - Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump notAnObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); - addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion))); - isImmediate.link(this); - notAnObject.link(this); - setThis.link(this); - map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this_strict), thisRegister, regT1, regT0); + WriteBarrierBase* cachedStructure = ¤tInstruction[2].u.structure; + int thisRegister = currentInstruction[1].u.operand; + + emitLoad(thisRegister, regT3, regT2); + + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag))); + addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); + loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0); + loadPtr(cachedStructure, regT2); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); } -void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned thisRegister = currentInstruction[1].u.operand; - linkSlowCase(iter); linkSlowCase(iter); + linkSlowCase(iter); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this); + slowPathCall.call(); +} - JITStubCall stubCall(this, cti_op_convert_this); - stubCall.addArgument(regT1, regT0); - stubCall.call(thisRegister); +void JIT::emit_op_check_tdz(Instruction* currentInstruction) +{ + emitLoadTag(currentInstruction[1].u.operand, regT0); + addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag))); } -void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned thisRegister = currentInstruction[1].u.operand; - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_convert_this_strict); - stubCall.addArgument(regT1, regT0); - stubCall.call(thisRegister); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); + slowPathCall.call(); } void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { - peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT2)); - - JITStubCall stubCall(this, cti_op_profile_will_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); - noProfiler.link(this); + load32(m_vm->enabledProfilerAddress(), regT0); + Jump profilerDone = branchTestPtr(Zero, regT0); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperation(operationProfileWillCall, regT1, regT0); + profilerDone.link(this); } void JIT::emit_op_profile_did_call(Instruction* currentInstruction) { - peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT2)); - - JITStubCall stubCall(this, cti_op_profile_did_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); - noProfiler.link(this); + load32(m_vm->enabledProfilerAddress(), regT0); + Jump profilerDone = branchTestPtr(Zero, regT0); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperation(operationProfileDidCall, regT1, regT0); + profilerDone.link(this); } -void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) +void JIT::emit_op_has_structure_property(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; - addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); - sub32(TrustedImm32(1), regT0); - emitStoreInt32(dst, regT0); + int base = currentInstruction[2].u.operand; + int enumerator = currentInstruction[4].u.operand; + + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); + + emitLoadPayload(enumerator, regT1); + + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + move(TrustedImm32(1), regT0); + emitStoreBool(dst, regT0); } -void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { - linkSlowCase(iter); - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; + Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; + + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); + move(TrustedImm32(1), regT0); + Jump done = jump(); + + LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + + patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - JITStubCall stubCall(this, cti_op_get_by_id_generic); - stubCall.addArgument(base); - stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident)))); - stubCall.call(dst); + patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); + + byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( + m_codeBlock, patchBuffer, + ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); + + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); } -void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; + int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; - addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); - emitLoad(property, regT1, regT2); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - add32(TrustedImm32(1), regT2); - // regT2 now contains the integer index of the argument we want, including this - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT3); - addSlowCase(branch32(AboveOrEqual, regT2, regT3)); + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - Jump skipOutofLineParams; - int numArgs = m_codeBlock->m_numParameters; - if (numArgs) { - Jump notInInPlaceArgs = branch32(AboveOrEqual, regT2, Imm32(numArgs)); - addPtr(Imm32(static_cast(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1); - loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - skipOutofLineParams = jump(); - notInInPlaceArgs.link(this); - } + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); - addPtr(Imm32(static_cast(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1); - mul32(TrustedImm32(sizeof(Register)), regT3, regT3); - subPtr(regT3, regT1); - loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - if (numArgs) - skipOutofLineParams.link(this); - emitStore(dst, regT1, regT0); + emitLoadPayload(property, regT1); + + // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. + // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if + // number was signed since m_vectorLength is always less than intmax (since the total allocation + // size is always less than 4Gb). As such zero extending will have been correct (and extending the value + // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign + // extending since it makes it easier to re-tag the value in the slow case. + zeroExtend32ToPtr(regT1, regT1); + + emitArrayProfilingSiteWithCell(regT0, regT2, profile); + and32(TrustedImm32(IndexingShapeMask), regT2); + + JITArrayMode mode = chooseArrayMode(profile); + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); + move(TrustedImm32(1), regT0); + + addSlowCase(badType); + addSlowCase(slowCases); + + Label done = label(); + + emitStoreBool(dst, regT0); + + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } -void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned arguments = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base array check + linkSlowCase(iter); // vector length check + linkSlowCase(iter); // empty value - linkSlowCase(iter); - Jump skipArgumentsCreation = jump(); + Label slowPath = label(); + + emitLoad(base, regT1, regT0); + emitLoad(property, regT3, regT2); + Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, profile); - linkSlowCase(iter); - linkSlowCase(iter); - if (m_codeBlock->m_numParameters == 1) - JITStubCall(this, cti_op_create_arguments_no_params).call(); - else - JITStubCall(this, cti_op_create_arguments).call(); + m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; + m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; + m_byValInstructionIndex++; +} + +void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int index = currentInstruction[4].u.operand; + int enumerator = currentInstruction[5].u.operand; + + // Check that base is a cell + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); + + // Check the structure + emitLoadPayload(enumerator, regT1); + load32(Address(regT0, JSCell::structureIDOffset()), regT2); + addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + // Compute the offset + emitLoadPayload(index, regT2); + // If index is less than the enumerator's cached inline storage, then it's an inline access + Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); + addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); + load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - emitStore(arguments, regT1, regT0); - emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0); + Jump done = jump(); + + // Otherwise it's out of line + outOfLineAccess.link(this); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); + sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2); + neg32(regT2); + int32_t offsetOfFirstProperty = static_cast(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - skipArgumentsCreation.link(this); - JITStubCall stubCall(this, cti_op_get_by_val); - stubCall.addArgument(arguments); - stubCall.addArgument(property); - stubCall.call(dst); + done.link(this); + emitValueProfilingSite(); + emitStore(dst, regT1, regT0); } -#if ENABLE(JIT_USE_SOFT_MODULO) -void JIT::softModulo() +void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector::iterator& iter) { - push(regT1); - push(regT3); - move(regT2, regT3); - move(regT0, regT2); - move(TrustedImm32(0), regT1); + int base = currentInstruction[2].u.operand; + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); - // Check for negative result reminder - Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); - neg32(regT3); - xor32(TrustedImm32(1), regT1); - positiveRegT3.link(this); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); + slowPathCall.call(); +} - Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, TrustedImm32(0)); - neg32(regT2); - xor32(TrustedImm32(2), regT1); - positiveRegT2.link(this); +void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; - // Save the condition for negative reminder - push(regT1); + emitLoadPayload(index, regT0); + emitLoadPayload(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); - Jump exitBranch = branch32(LessThan, regT2, regT3); + move(TrustedImm32(JSValue::NullTag), regT2); + move(TrustedImm32(0), regT0); - // Power of two fast case - move(regT3, regT0); - sub32(TrustedImm32(1), regT0); - Jump powerOfTwo = branchTest32(NonZero, regT0, regT3); - and32(regT0, regT2); - powerOfTwo.link(this); + Jump done = jump(); + inBounds.link(this); - and32(regT3, regT0); + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); + move(TrustedImm32(JSValue::CellTag), regT2); - Jump exitBranch2 = branchTest32(Zero, regT0); + done.link(this); + emitStore(dst, regT2, regT0); +} - countLeadingZeros32(regT2, regT0); - countLeadingZeros32(regT3, regT1); - sub32(regT0, regT1); +void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; - Jump useFullTable = branch32(Equal, regT1, TrustedImm32(31)); + emitLoadPayload(index, regT0); + emitLoadPayload(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); - neg32(regT1); - add32(TrustedImm32(31), regT1); + move(TrustedImm32(JSValue::NullTag), regT2); + move(TrustedImm32(0), regT0); - int elementSizeByShift = -1; -#if CPU(ARM) - elementSizeByShift = 3; -#else -#error "JIT_OPTIMIZE_MOD not yet supported on this platform." -#endif - relativeTableJump(regT1, elementSizeByShift); - - useFullTable.link(this); - // Modulo table - for (int i = 31; i > 0; --i) { -#if CPU(ARM_TRADITIONAL) - m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i)); - m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS); -#elif CPU(ARM_THUMB2) - ShiftTypeAndAmount shift(SRType_LSL, i); - m_assembler.sub_S(regT1, regT2, regT3, shift); - m_assembler.it(ARMv7Assembler::ConditionCS); - m_assembler.mov(regT2, regT1); -#else -#error "JIT_OPTIMIZE_MOD not yet supported on this platform." -#endif + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); + move(TrustedImm32(JSValue::CellTag), regT2); + + done.link(this); + emitStore(dst, regT2, regT0); +} + +void JIT::emit_op_profile_type(Instruction* currentInstruction) +{ + TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; + int valueToProfile = currentInstruction[1].u.operand; + + // Load payload in T0. Load tag in T3. + emitLoadPayload(valueToProfile, regT0); + emitLoadTag(valueToProfile, regT3); + + JumpList jumpToEnd; + + // Compile in a predictive type check, if possible, to see if we can skip writing to the log. + // These typechecks are inlined to match those of the 32-bit JSValue type checks. + if (cachedTypeLocation->m_lastSeenType == TypeUndefined) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeNull) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); + else if (cachedTypeLocation->m_lastSeenType == TypeNumber) { + jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag))); + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); + } else if (cachedTypeLocation->m_lastSeenType == TypeString) { + Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); + jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + isNotCell.link(this); } - Jump lower = branch32(Below, regT2, regT3); - sub32(regT3, regT2); - lower.link(this); + // Load the type profiling log into T2. + TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); + move(TrustedImmPtr(cachedTypeProfilerLog), regT2); - exitBranch.link(this); - exitBranch2.link(this); + // Load the next log entry into T1. + loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); - // Check for negative reminder - pop(regT1); - Jump positiveResult = branch32(Equal, regT1, TrustedImm32(0)); - neg32(regT2); - positiveResult.link(this); + // Store the JSValue onto the log entry. + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - move(regT2, regT0); + // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry. + Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + Jump skipNotCell = jump(); + notCell.link(this); + store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + skipNotCell.link(this); - pop(regT3); - pop(regT1); - ret(); + // Store the typeLocation on the log entry. + move(TrustedImmPtr(cachedTypeLocation), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); + store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); + jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()))); + // Clear the log if we're at the end of the log. + callOperation(operationProcessTypeProfilerLog); + + jumpToEnd.link(this); } -#endif // ENABLE(JIT_USE_SOFT_MODULO) } // namespace JSC