X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/ba379fdc102753d6be2c4d937058fe40257329fe..4be4e30906bcb8ee30b4d189205cb70bad6707ce:/jit/JITOpcodes.cpp diff --git a/jit/JITOpcodes.cpp b/jit/JITOpcodes.cpp index 85997c2..2a88f50 100644 --- a/jit/JITOpcodes.cpp +++ b/jit/JITOpcodes.cpp @@ -1,5 +1,6 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2010 Patrick Gansterer * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,672 +25,321 @@ */ #include "config.h" -#include "JIT.h" - #if ENABLE(JIT) +#include "JIT.h" -#include "JITInlineMethods.h" +#include "Arguments.h" +#include "CopiedSpaceInlines.h" +#include "Heap.h" +#include "JITInlines.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" +#include "JSPropertyNameIterator.h" #include "LinkBuffer.h" namespace JSC { -#if USE(JSVALUE32_64) +#if USE(JSVALUE64) -void JIT::privateCompileCTIMachineTrampolines(RefPtr* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk) +JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction) { -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - // (1) This function provides fast property access for string length - Label stringLengthBegin = align(); - - // regT0 holds payload, regT1 holds tag - - Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); - - // Checks out okay! - get the length from the Ustring. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT2); - load32(Address(regT2, OBJECT_OFFSETOF(UString::Rep, len)), regT2); - - Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX)); - move(regT2, regT0); - move(Imm32(JSValue::Int32Tag), regT1); - - ret(); -#endif - - // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct. - -#if ENABLE(JIT_OPTIMIZE_CALL) - /* VirtualCallPreLink Trampoline */ - Label virtualCallPreLinkBegin = align(); - - // regT0 holds callee, regT1 holds argCount. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2); - Jump hasCodeBlock1 = branchTestPtr(NonZero, regT2); - - // Lazily generate a CodeBlock. - preserveReturnAddressAfterCall(regT3); // return address - restoreArgumentReference(); - Call callJSFunction1 = call(); - move(regT0, regT2); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - hasCodeBlock1.link(this); - - // regT2 holds codeBlock. - Jump isNativeFunc1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee arity. - Jump arityCheckOkay1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 3); // return address - emitPutJITStubArg(regT2, 7); // codeBlock - restoreArgumentReference(); - Call callArityCheck1 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - - arityCheckOkay1.link(this); - isNativeFunc1.link(this); - - compileOpCallInitializeCallFrame(); - - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 3); - restoreArgumentReference(); - Call callDontLazyLinkCall = call(); - restoreReturnAddressBeforeReturn(regT3); - jump(regT0); - - /* VirtualCallLink Trampoline */ - Label virtualCallLinkBegin = align(); - - // regT0 holds callee, regT1 holds argCount. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2); - Jump hasCodeBlock2 = branchTestPtr(NonZero, regT2); - - // Lazily generate a CodeBlock. - preserveReturnAddressAfterCall(regT3); // return address - restoreArgumentReference(); - Call callJSFunction2 = call(); - move(regT0, regT2); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - hasCodeBlock2.link(this); - - // regT2 holds codeBlock. - Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee arity. - Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 3); // return address - emitPutJITStubArg(regT2, 7); // codeBlock - restoreArgumentReference(); - Call callArityCheck2 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - - arityCheckOkay2.link(this); - isNativeFunc2.link(this); - - compileOpCallInitializeCallFrame(); - - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 3); - restoreArgumentReference(); - Call callLazyLinkCall = call(); - restoreReturnAddressBeforeReturn(regT3); - jump(regT0); -#endif // ENABLE(JIT_OPTIMIZE_CALL) - - /* VirtualCall Trampoline */ - Label virtualCallBegin = align(); - - // regT0 holds callee, regT1 holds argCount. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2); - Jump hasCodeBlock3 = branchTestPtr(NonZero, regT2); - - // Lazily generate a CodeBlock. - preserveReturnAddressAfterCall(regT3); // return address - restoreArgumentReference(); - Call callJSFunction3 = call(); - move(regT0, regT2); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - hasCodeBlock3.link(this); - - // regT2 holds codeBlock. - Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee. - Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 3); // return address - emitPutJITStubArg(regT2, 7); // codeBlock - restoreArgumentReference(); - Call callArityCheck3 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT0); // callee - emitGetJITStubArg(5, regT1); // argCount - restoreReturnAddressBeforeReturn(regT3); // return address - - arityCheckOkay3.link(this); - isNativeFunc3.link(this); - compileOpCallInitializeCallFrame(); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT0); - loadPtr(Address(regT0, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0); - jump(regT0); - -#if PLATFORM(X86) - Label nativeCallThunk = align(); - preserveReturnAddressAfterCall(regT0); - emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address - - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1); - emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); - - /* We have two structs that we use to describe the stackframe we set up for our - * call to native code. NativeCallFrameStructure describes the how we set up the stack - * in advance of the call. NativeFunctionCalleeSignature describes the callframe - * as the native code expects it. We do this as we are using the fastcall calling - * convention which results in the callee popping its arguments off the stack, but - * not the rest of the callframe so we need a nice way to ensure we increment the - * stack pointer by the right amount after the call. - */ - -#if COMPILER(MSVC) || PLATFORM(LINUX) -#if COMPILER(MSVC) -#pragma pack(push) -#pragma pack(4) -#endif // COMPILER(MSVC) - struct NativeCallFrameStructure { - // CallFrame* callFrame; // passed in EDX - JSObject* callee; - JSValue thisValue; - ArgList* argPointer; - ArgList args; - JSValue result; - }; - struct NativeFunctionCalleeSignature { - JSObject* callee; - JSValue thisValue; - ArgList* argPointer; - }; -#if COMPILER(MSVC) -#pragma pack(pop) -#endif // COMPILER(MSVC) -#else - struct NativeCallFrameStructure { - // CallFrame* callFrame; // passed in ECX - // JSObject* callee; // passed in EDX - JSValue thisValue; - ArgList* argPointer; - ArgList args; - }; - struct NativeFunctionCalleeSignature { - JSValue thisValue; - ArgList* argPointer; - }; -#endif - - const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15; - // Allocate system stack frame - subPtr(Imm32(NativeCallFrameSize), stackPointerRegister); - - // Set up arguments - subPtr(Imm32(1), regT0); // Don't include 'this' in argcount - - // push argcount - storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount))); - - // Calculate the start of the callframe header, and store in regT1 - addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1); - - // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0) - mul32(Imm32(sizeof(Register)), regT0, regT0); - subPtr(regT0, regT1); - storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args))); - - // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0); - storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer))); - - // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this' - loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); - loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3); - storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - -#if COMPILER(MSVC) || PLATFORM(LINUX) - // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx); - - // Plant callee - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax); - storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee))); - - // Plant callframe - move(callFrameRegister, X86::edx); - - call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data))); - - // JSValue is a non-POD type, so eax points to it - emitLoad(0, regT1, regT0, X86::eax); -#else - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx); // callee - move(callFrameRegister, X86::ecx); // callFrame - call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data))); -#endif - - // We've put a few temporaries on the stack in addition to the actual arguments - // so pull them off now - addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister); - - // Check for an exception - // FIXME: Maybe we can optimize this comparison to JSValue(). - move(ImmPtr(&globalData->exception), regT2); - Jump sawException1 = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::CellTag)); - Jump sawException2 = branch32(NonZero, payloadFor(0, regT2), Imm32(0)); - - // Grab the return address. - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3); - - // Restore our caller's "r". - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - - // Return. - restoreReturnAddressBeforeReturn(regT3); - ret(); - - // Handle an exception - sawException1.link(this); - sawException2.link(this); - // Grab the return address. - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); - move(ImmPtr(&globalData->exceptionLocation), regT2); - storePtr(regT1, regT2); - move(ImmPtr(reinterpret_cast(ctiVMThrowTrampoline)), regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); - restoreReturnAddressBeforeReturn(regT2); - ret(); - -#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) -#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." -#else - breakpoint(); -#endif - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); - Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); - Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); -#endif - - // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail)); -#endif -#if ENABLE(JIT_OPTIMIZE_CALL) - patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction)); - patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction)); - patchBuffer.link(callDontLazyLinkCall, FunctionPtr(cti_vm_dontLazyLinkCall)); - patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall)); -#endif - patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_JSFunction)); - - CodeRef finalCode = patchBuffer.finalizeCode(); - *executablePool = finalCode.m_executablePool; - - *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin); - *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk); -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin); -#else - UNUSED_PARAM(ctiStringLengthTrampoline); -#endif -#if ENABLE(JIT_OPTIMIZE_CALL) - *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin); - *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin); -#else - UNUSED_PARAM(ctiVirtualCallPreLink); - UNUSED_PARAM(ctiVirtualCallLink); -#endif + return vm->getCTIStub(nativeCallGenerator); } void JIT::emit_op_mov(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - - if (m_codeBlock->isConstantRegisterIndex(src)) - emitStore(dst, getConstantOperand(src)); - else { - emitLoad(src, regT1, regT0); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0); + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + if (canBeOptimizedOrInlined()) { + // Use simpler approach, since the DFG thinks that the last result register + // is always set to the destination on every operation. + emitGetVirtualRegister(src, regT0); + emitPutVirtualRegister(dst); + } else { + if (m_codeBlock->isConstantRegisterIndex(src)) { + if (!getConstantOperand(src).isNumber()) + store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); + else + store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); + if (dst == m_lastResultBytecodeRegister) + killLastResultRegister(); + } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) { + // If either the src or dst is the cached register go though + // get/put registers to make sure we track this correctly. + emitGetVirtualRegister(src, regT0); + emitPutVirtualRegister(dst); + } else { + // Perform the copy via regT1; do not disturb any mapping in regT0. + load64(Address(callFrameRegister, src * sizeof(Register)), regT1); + store64(regT1, Address(callFrameRegister, dst * sizeof(Register))); + } } } void JIT::emit_op_end(Instruction* currentInstruction) { - if (m_codeBlock->needsFullScopeChain()) - JITStubCall(this, cti_op_end).call(); - ASSERT(returnValueRegister != callFrameRegister); - emitLoad(currentInstruction[1].u.operand, regT1, regT0); - restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast(sizeof(Register)))); + RELEASE_ASSERT(returnValueRegister != callFrameRegister); + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); + restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast(sizeof(Register)))); ret(); } void JIT::emit_op_jmp(Instruction* currentInstruction) { unsigned target = currentInstruction[1].u.operand; - addJump(jump(), target + 1); -} - -void JIT::emit_op_loop(Instruction* currentInstruction) -{ - unsigned target = currentInstruction[1].u.operand; - emitTimeoutCheck(); - addJump(jump(), target + 1); -} - -void JIT::emit_op_loop_if_less(Instruction* currentInstruction) -{ - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - - emitTimeoutCheck(); - - if (isOperandConstantImmediateInt(op1)) { - emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3); - return; - } - - if (isOperandConstantImmediateInt(op2)) { - emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3); - return; - } - - emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThan, regT0, regT2), target + 3); + addJump(jump(), target); } -void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_new_object(Instruction* currentInstruction) { - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; + Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); + size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity()); + MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize); - if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) - linkSlowCase(iter); // int32 check - linkSlowCase(iter); // int32 check + RegisterID resultReg = regT0; + RegisterID allocatorReg = regT1; + RegisterID scratchReg = regT2; - JITStubCall stubCall(this, cti_op_loop_if_less); - stubCall.addArgument(op1); - stubCall.addArgument(op2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + move(TrustedImmPtr(allocator), allocatorReg); + emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg); + emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) +void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - - emitTimeoutCheck(); - - if (isOperandConstantImmediateInt(op1)) { - emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3); - return; - } - - if (isOperandConstantImmediateInt(op2)) { - emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3); - return; - } - - emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThanOrEqual, regT0, regT2), target + 3); + linkSlowCase(iter); + JITStubCall stubCall(this, cti_op_new_object); + stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure())); + stubCall.call(currentInstruction[1].u.operand); } -void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_check_has_instance(Instruction* currentInstruction) { - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; + unsigned baseVal = currentInstruction[3].u.operand; - if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) - linkSlowCase(iter); // int32 check - linkSlowCase(iter); // int32 check + emitGetVirtualRegister(baseVal, regT0); - JITStubCall stubCall(this, cti_op_loop_if_lesseq); - stubCall.addArgument(op1); - stubCall.addArgument(op2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); -} + // Check that baseVal is a cell. + emitJumpSlowCaseIfNotJSCell(regT0, baseVal); -void JIT::emit_op_new_object(Instruction* currentInstruction) -{ - JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand); + // Check that baseVal 'ImplementsHasInstance'. + loadPtr(Address(regT0, JSCell::structureOffset()), regT0); + addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned value = currentInstruction[2].u.operand; - unsigned baseVal = currentInstruction[3].u.operand; - unsigned proto = currentInstruction[4].u.operand; + unsigned proto = currentInstruction[3].u.operand; // Load the operands (baseVal, proto, and value respectively) into registers. // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. - emitLoadPayload(proto, regT1); - emitLoadPayload(baseVal, regT0); - emitLoadPayload(value, regT2); - - // Check that baseVal & proto are cells. - emitJumpSlowCaseIfNotJSCell(proto); - emitJumpSlowCaseIfNotJSCell(baseVal); - - // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // FIXME: Maybe remove this test. - addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance))); // FIXME: TOT checks ImplementsDefaultHasInstance. + emitGetVirtualRegister(value, regT2); + emitGetVirtualRegister(proto, regT1); - // If value is not an Object, return false. - emitLoadTag(value, regT0); - Jump valueIsImmediate = branch32(NotEqual, regT0, Imm32(JSValue::CellTag)); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // FIXME: Maybe remove this test. - - // Check proto is object. - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); + // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance. + emitJumpSlowCaseIfNotJSCell(regT2, value); + emitJumpSlowCaseIfNotJSCell(regT1, proto); + // Check that prototype is an object + loadPtr(Address(regT1, JSCell::structureOffset()), regT3); + addSlowCase(emitJumpIfNotObject(regT3)); + // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain. - move(Imm32(JSValue::TrueTag), regT0); + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); Label loop(this); // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + load64(Address(regT2, Structure::prototypeOffset()), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); - branch32(NotEqual, regT2, Imm32(0), loop); + emitJumpIfJSCell(regT2).linkTo(loop, this); // We get here either by dropping out of the loop, or if value was not an Object. Result is false. - valueIsImmediate.link(this); - valueIsNotObject.link(this); - move(Imm32(JSValue::FalseTag), regT0); + move(TrustedImm64(JSValue::encode(jsBoolean(false))), regT0); // isInstance jumps right down to here, to skip setting the result to false (it has already set true). isInstance.link(this); - emitStoreBool(dst, regT0); + emitPutVirtualRegister(dst); } -void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_is_undefined(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned value = currentInstruction[2].u.operand; - unsigned baseVal = currentInstruction[3].u.operand; - unsigned proto = currentInstruction[4].u.operand; - - linkSlowCaseIfNotJSCell(iter, baseVal); - linkSlowCaseIfNotJSCell(iter, proto); - linkSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); + + emitGetVirtualRegister(value, regT0); + Jump isCell = emitJumpIfJSCell(regT0); - JITStubCall stubCall(this, cti_op_instanceof); - stubCall.addArgument(value); - stubCall.addArgument(baseVal); - stubCall.addArgument(proto); - stubCall.call(dst); + compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0); + Jump done = jump(); + + isCell.link(this); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(0), regT0); + Jump notMasqueradesAsUndefined = jump(); + + isMasqueradesAsUndefined.link(this); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); + comparePtr(Equal, regT0, regT1, regT0); + + notMasqueradesAsUndefined.link(this); + done.link(this); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); } -void JIT::emit_op_new_func(Instruction* currentInstruction) +void JIT::emit_op_is_boolean(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_new_func); - stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + xor64(TrustedImm32(static_cast(ValueFalse)), regT0); + test64(Zero, regT0, TrustedImm32(static_cast(~1)), regT0); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); } -void JIT::emit_op_get_global_var(Instruction* currentInstruction) +void JIT::emit_op_is_number(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - JSGlobalObject* globalObject = static_cast(currentInstruction[2].u.jsCell); - ASSERT(globalObject->isGlobalObject()); - int index = currentInstruction[3].u.operand; - - loadPtr(&globalObject->d()->registers, regT2); - - emitLoad(index, regT1, regT0, regT2); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0); + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + test64(NonZero, regT0, tagTypeNumberRegister, regT0); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); } -void JIT::emit_op_put_global_var(Instruction* currentInstruction) +void JIT::emit_op_is_string(Instruction* currentInstruction) { - JSGlobalObject* globalObject = static_cast(currentInstruction[1].u.jsCell); - ASSERT(globalObject->isGlobalObject()); - int index = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; - - emitLoad(value, regT1, regT0); - - loadPtr(&globalObject->d()->registers, regT2); - emitStore(index, regT1, regT0, regT2); - map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0); + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + Jump isNotCell = emitJumpIfNotJSCell(regT0); + + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + emitTagAsBoolImmediate(regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(ValueFalse), regT0); + + done.link(this); + emitPutVirtualRegister(dst); } -void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) +void JIT::emit_op_call(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int index = currentInstruction[2].u.operand; - int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(); - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - while (skip--) - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); - - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2); - - emitLoad(index, regT1, regT0, regT2); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0); + compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); } -void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) +void JIT::emit_op_call_eval(Instruction* currentInstruction) { - int index = currentInstruction[1].u.operand; - int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain(); - int value = currentInstruction[3].u.operand; - - emitLoad(value, regT1, regT0); - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - while (skip--) - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); + compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex); +} - loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2); +void JIT::emit_op_call_varargs(Instruction* currentInstruction) +{ + compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); +} - emitStore(index, regT1, regT0, regT2); - map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0); +void JIT::emit_op_construct(Instruction* currentInstruction) +{ + compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); } void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { + int activation = currentInstruction[1].u.operand; + Jump activationNotCreated = branchTest64(Zero, addressFor(activation)); JITStubCall stubCall(this, cti_op_tear_off_activation); - stubCall.addArgument(currentInstruction[1].u.operand); + stubCall.addArgument(activation, regT2); stubCall.call(); + activationNotCreated.link(this); } -void JIT::emit_op_tear_off_arguments(Instruction*) +void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) { - JITStubCall(this, cti_op_tear_off_arguments).call(); + int arguments = currentInstruction[1].u.operand; + int activation = currentInstruction[2].u.operand; + + Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments)))); + JITStubCall stubCall(this, cti_op_tear_off_arguments); + stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2); + stubCall.addArgument(activation, regT2); + stubCall.call(); + argsNotCreated.link(this); } -void JIT::emit_op_new_array(Instruction* currentInstruction) +void JIT::emit_op_ret(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_new_array); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueRegister); + ASSERT(returnValueRegister != callFrameRegister); + + // Return the result in %eax. + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); + + // Grab the return address. + emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1); + + // Restore our caller's "r". + emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); } -void JIT::emit_op_resolve(Instruction* currentInstruction) +void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_resolve); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueRegister); + ASSERT(returnValueRegister != callFrameRegister); + + // Return the result in %eax. + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); + Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister); + loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2); + Jump notObject = emitJumpIfNotObject(regT2); + + // Grab the return address. + emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1); + + // Restore our caller's "r". + emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); + + // Return 'this' in %eax. + notJSCell.link(this); + notObject.link(this); + emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister); + + // Grab the return address. + emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1); + + // Restore our caller's "r". + emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); } void JIT::emit_op_to_primitive(Instruction* currentInstruction) @@ -697,1587 +347,53 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) int dst = currentInstruction[1].u.operand; int src = currentInstruction[2].u.operand; - emitLoad(src, regT1, regT0); - - Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); + emitGetVirtualRegister(src, regT0); + + Jump isImm = emitJumpIfNotJSCell(regT0); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); isImm.link(this); if (dst != src) - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0); -} - -void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - - linkSlowCase(iter); + emitPutVirtualRegister(dst); - JITStubCall stubCall(this, cti_op_to_primitive); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); } void JIT::emit_op_strcat(Instruction* currentInstruction) { JITStubCall stubCall(this, cti_op_strcat); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); stubCall.call(currentInstruction[1].u.operand); } -void JIT::emit_op_loop_if_true(Instruction* currentInstruction) +void JIT::emit_op_not(Instruction* currentInstruction) { - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitTimeoutCheck(); - - emitLoad(cond, regT1, regT0); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - addJump(branch32(NotEqual, regT0, Imm32(0)), target + 2); - Jump isNotZero = jump(); + // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be + // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively). + // Then invert against JSValue(true), which will add the tag back in, and flip the low bit. + xor64(TrustedImm32(static_cast(ValueFalse)), regT0); + addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast(~1)))); + xor64(TrustedImm32(static_cast(ValueTrue)), regT0); - isNotInteger.link(this); - - addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag))); - - isNotZero.link(this); -} - -void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(cond); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2); -} - -void JIT::emit_op_resolve_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_skip(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_skip); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain())); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_global(Instruction* currentInstruction) -{ - // FIXME: Optimize to use patching instead of so many memory accesses. - - unsigned dst = currentInstruction[1].u.operand; - void* globalObject = currentInstruction[2].u.jsCell; - - unsigned currentIndex = m_globalResolveInfoIndex++; - void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure); - void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); - - // Verify structure. - move(ImmPtr(globalObject), regT0); - loadPtr(structureAddress, regT1); - addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); - - // Load property. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2); - load32(offsetAddr, regT3); - load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload - load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0); -} - -void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - void* globalObject = currentInstruction[2].u.jsCell; - Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); - - unsigned currentIndex = m_globalResolveInfoIndex++; - - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_resolve_global); - stubCall.addArgument(ImmPtr(globalObject)); - stubCall.addArgument(ImmPtr(ident)); - stubCall.addArgument(Imm32(currentIndex)); - stubCall.call(dst); -} - -void JIT::emit_op_not(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - - emitLoadTag(src, regT0); - - xor32(Imm32(JSValue::FalseTag), regT0); - addSlowCase(branchTest32(NonZero, regT0, Imm32(~1))); - xor32(Imm32(JSValue::TrueTag), regT0); - - emitStoreBool(dst, regT0, (dst == src)); -} - -void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_not); - stubCall.addArgument(src); - stubCall.call(dst); -} - -void JIT::emit_op_jfalse(Instruction* currentInstruction) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitLoad(cond, regT1, regT0); - - Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag)); - addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target + 2); - - Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0)); - addJump(jump(), target + 2); - - if (supportsFloatingPoint()) { - isNotInteger.link(this); - - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); - - zeroDouble(fpRegT0); - emitLoadDouble(cond, fpRegT1); - addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2); - } else - addSlowCase(isNotInteger); - - isTrue.link(this); - isTrue2.link(this); -} - -void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(cond); - stubCall.call(); - emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // Inverted. -} - -void JIT::emit_op_jtrue(Instruction* currentInstruction) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitLoad(cond, regT1, regT0); - - Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag)); - addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2); - - Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - Jump isFalse2 = branch32(Equal, regT0, Imm32(0)); - addJump(jump(), target + 2); - - if (supportsFloatingPoint()) { - isNotInteger.link(this); - - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); - - zeroDouble(fpRegT0); - emitLoadDouble(cond, fpRegT1); - addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2); - } else - addSlowCase(isNotInteger); - - isFalse.link(this); - isFalse2.link(this); -} - -void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(cond); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2); -} - -void JIT::emit_op_jeq_null(Instruction* currentInstruction) -{ - unsigned src = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitLoad(src, regT1, regT0); - - Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - - // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); - - Jump wasNotImmediate = jump(); - - // Now handle the immediate cases - undefined & null - isImmediate.link(this); - - set32(Equal, regT1, Imm32(JSValue::NullTag), regT2); - set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); - or32(regT2, regT1); - - addJump(branchTest32(NonZero, regT1), target + 2); - - wasNotImmediate.link(this); -} - -void JIT::emit_op_jneq_null(Instruction* currentInstruction) -{ - unsigned src = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitLoad(src, regT1, regT0); - - Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - - // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); - - Jump wasNotImmediate = jump(); - - // Now handle the immediate cases - undefined & null - isImmediate.link(this); - - set32(Equal, regT1, Imm32(JSValue::NullTag), regT2); - set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); - or32(regT2, regT1); - - addJump(branchTest32(Zero, regT1), target + 2); - - wasNotImmediate.link(this); -} - -void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) -{ - unsigned src = currentInstruction[1].u.operand; - JSCell* ptr = currentInstruction[2].u.jsCell; - unsigned target = currentInstruction[3].u.operand; - - emitLoad(src, regT1, regT0); - addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target + 3); - addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target + 3); -} - -void JIT::emit_op_jsr(Instruction* currentInstruction) -{ - int retAddrDst = currentInstruction[1].u.operand; - int target = currentInstruction[2].u.operand; - DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); - addJump(jump(), target + 2); - m_jsrSites.append(JSRInfo(storeLocation, label())); -} - -void JIT::emit_op_sret(Instruction* currentInstruction) -{ - jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); -} - -void JIT::emit_op_eq(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - - emitLoad2(src1, regT1, regT0, src2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, regT3)); - addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag))); - addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag))); - - set8(Equal, regT0, regT2, regT0); - or32(Imm32(JSValue::FalseTag), regT0); - - emitStoreBool(dst, regT0); -} - -void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned op1 = currentInstruction[2].u.operand; - unsigned op2 = currentInstruction[3].u.operand; - - JumpList storeResult; - JumpList genericCase; - - genericCase.append(getSlowCase(iter)); // tags not equal - - linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); - genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr))); - - // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(); - storeResult.append(jump()); - - // Generic case. - genericCase.append(getSlowCase(iter)); // doubles - genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(op1); - stubCallEq.addArgument(op2); - stubCallEq.call(regT0); - - storeResult.link(this); - or32(Imm32(JSValue::FalseTag), regT0); - emitStoreBool(dst, regT0); -} - -void JIT::emit_op_neq(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - - emitLoad2(src1, regT1, regT0, src2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, regT3)); - addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag))); - addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag))); - - set8(NotEqual, regT0, regT2, regT0); - or32(Imm32(JSValue::FalseTag), regT0); - - emitStoreBool(dst, regT0); -} - -void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - - JumpList storeResult; - JumpList genericCase; - - genericCase.append(getSlowCase(iter)); // tags not equal - - linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); - genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr))); - - // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(regT0); - storeResult.append(jump()); - - // Generic case. - genericCase.append(getSlowCase(iter)); // doubles - genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(regT1, regT0); - stubCallEq.addArgument(regT3, regT2); - stubCallEq.call(regT0); - - storeResult.link(this); - xor32(Imm32(0x1), regT0); - or32(Imm32(JSValue::FalseTag), regT0); - emitStoreBool(dst, regT0); -} - -void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - - emitLoadTag(src1, regT0); - emitLoadTag(src2, regT1); - - // Jump to a slow case if either operand is double, or if both operands are - // cells and/or Int32s. - move(regT0, regT2); - and32(regT1, regT2); - addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag))); - addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag))); - - if (type == OpStrictEq) - set8(Equal, regT0, regT1, regT0); - else - set8(NotEqual, regT0, regT1, regT0); - - or32(Imm32(JSValue::FalseTag), regT0); - - emitStoreBool(dst, regT0); -} - -void JIT::emit_op_stricteq(Instruction* currentInstruction) -{ - compileOpStrictEq(currentInstruction, OpStrictEq); -} - -void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - - linkSlowCase(iter); - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_stricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); -} - -void JIT::emit_op_nstricteq(Instruction* currentInstruction) -{ - compileOpStrictEq(currentInstruction, OpNStrictEq); -} - -void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - - linkSlowCase(iter); - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_nstricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); -} - -void JIT::emit_op_eq_null(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - - emitLoad(src, regT1, regT0); - Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); - setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1); - - Jump wasNotImmediate = jump(); - - isImmediate.link(this); - - set8(Equal, regT1, Imm32(JSValue::NullTag), regT2); - set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); - or32(regT2, regT1); - - wasNotImmediate.link(this); - - or32(Imm32(JSValue::FalseTag), regT1); - - emitStoreBool(dst, regT1); -} - -void JIT::emit_op_neq_null(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - - emitLoad(src, regT1, regT0); - Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); - setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1); - - Jump wasNotImmediate = jump(); - - isImmediate.link(this); - - set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2); - set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1); - and32(regT2, regT1); - - wasNotImmediate.link(this); - - or32(Imm32(JSValue::FalseTag), regT1); - - emitStoreBool(dst, regT1); -} - -void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_with_base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(currentInstruction[2].u.operand); -} - -void JIT::emit_op_new_func_exp(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_func_exp); - stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_new_regexp(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_regexp); - stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_throw(Instruction* currentInstruction) -{ - unsigned exception = currentInstruction[1].u.operand; - JITStubCall stubCall(this, cti_op_throw); - stubCall.addArgument(exception); - stubCall.call(); - -#ifndef NDEBUG - // cti_op_throw always changes it's return address, - // this point in the code should never be reached. - breakpoint(); -#endif -} - -void JIT::emit_op_next_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int iter = currentInstruction[2].u.operand; - int target = currentInstruction[3].u.operand; - - load32(Address(callFrameRegister, (iter * sizeof(Register))), regT0); - - JITStubCall stubCall(this, cti_op_next_pname); - stubCall.addArgument(regT0); - stubCall.call(); - - Jump endOfIter = branchTestPtr(Zero, regT0); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_next_pname), dst, regT1, regT0); - addJump(jump(), target + 3); - endOfIter.link(this); -} - -void JIT::emit_op_push_scope(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_push_scope); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_pop_scope(Instruction*) -{ - JITStubCall(this, cti_op_pop_scope).call(); -} - -void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; - - emitLoad(src, regT1, regT0); - - Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag)); - addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::DeletedValueTag))); - isInt32.link(this); - - if (src != dst) - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0); -} - -void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_to_jsnumber); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); -} - -void JIT::emit_op_push_new_scope(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_push_new_scope); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(currentInstruction[3].u.operand); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_catch(Instruction* currentInstruction) -{ - unsigned exception = currentInstruction[1].u.operand; - - // This opcode only executes after a return from cti_op_throw. - - // cti_op_throw may have taken us to a call frame further up the stack; reload - // the call frame pointer to adjust. - peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); - - // Now store the exception returned by cti_op_throw. - emitStore(exception, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0); -} - -void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_jmp_scopes); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(); - addJump(jump(), currentInstruction[2].u.operand + 2); -} - -void JIT::emit_op_switch_imm(Instruction* currentInstruction) -{ - unsigned tableIndex = currentInstruction[1].u.operand; - unsigned defaultOffset = currentInstruction[2].u.operand; - unsigned scrutinee = currentInstruction[3].u.operand; - - // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); - - JITStubCall stubCall(this, cti_op_switch_imm); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); -} - -void JIT::emit_op_switch_char(Instruction* currentInstruction) -{ - unsigned tableIndex = currentInstruction[1].u.operand; - unsigned defaultOffset = currentInstruction[2].u.operand; - unsigned scrutinee = currentInstruction[3].u.operand; - - // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); - - JITStubCall stubCall(this, cti_op_switch_char); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); -} - -void JIT::emit_op_switch_string(Instruction* currentInstruction) -{ - unsigned tableIndex = currentInstruction[1].u.operand; - unsigned defaultOffset = currentInstruction[2].u.operand; - unsigned scrutinee = currentInstruction[3].u.operand; - - // create jump table for switch destinations, track this switch statement. - StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); - - JITStubCall stubCall(this, cti_op_switch_string); - stubCall.addArgument(scrutinee); - stubCall.addArgument(Imm32(tableIndex)); - stubCall.call(); - jump(regT0); -} - -void JIT::emit_op_new_error(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned type = currentInstruction[2].u.operand; - unsigned message = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_new_error); - stubCall.addArgument(Imm32(type)); - stubCall.addArgument(m_codeBlock->getConstant(message)); - stubCall.addArgument(Imm32(m_bytecodeIndex)); - stubCall.call(dst); -} - -void JIT::emit_op_debug(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_debug); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(); -} - - -void JIT::emit_op_enter(Instruction*) -{ - // Even though JIT code doesn't use them, we initialize our constant - // registers to zap stale pointers, to avoid unnecessarily prolonging - // object lifetime and increasing GC pressure. - for (int i = 0; i < m_codeBlock->m_numVars; ++i) - emitStore(i, jsUndefined()); -} - -void JIT::emit_op_enter_with_activation(Instruction* currentInstruction) -{ - emit_op_enter(currentInstruction); - - JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_create_arguments(Instruction*) -{ - Jump argsNotCell = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::CellTag)); - Jump argsNotNull = branchTestPtr(NonZero, payloadFor(RegisterFile::ArgumentsRegister, callFrameRegister)); - - // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation. - if (m_codeBlock->m_numParameters == 1) - JITStubCall(this, cti_op_create_arguments_no_params).call(); - else - JITStubCall(this, cti_op_create_arguments).call(); - - argsNotCell.link(this); - argsNotNull.link(this); -} - -void JIT::emit_op_init_arguments(Instruction*) -{ - emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister); -} - -void JIT::emit_op_convert_this(Instruction* currentInstruction) -{ - unsigned thisRegister = currentInstruction[1].u.operand; - - emitLoad(thisRegister, regT1, regT0); - - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addSlowCase(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); - - map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0); -} - -void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned thisRegister = currentInstruction[1].u.operand; - - linkSlowCase(iter); - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_convert_this); - stubCall.addArgument(regT1, regT0); - stubCall.call(thisRegister); -} - -void JIT::emit_op_profile_will_call(Instruction* currentInstruction) -{ - peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT2)); - - JITStubCall stubCall(this, cti_op_profile_will_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); - noProfiler.link(this); -} - -void JIT::emit_op_profile_did_call(Instruction* currentInstruction) -{ - peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT2)); - - JITStubCall stubCall(this, cti_op_profile_did_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); - noProfiler.link(this); -} - -#else // USE(JSVALUE32_64) - -#define RECORD_JUMP_TARGET(targetOffset) \ - do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false) - -void JIT::privateCompileCTIMachineTrampolines(RefPtr* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk) -{ -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - // (2) The second function provides fast property access for string length - Label stringLengthBegin = align(); - - // Check eax is a string - Jump string_failureCases1 = emitJumpIfNotJSCell(regT0); - Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); - - // Checks out okay! - get the length from the Ustring. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0); - load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0); - - Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); - - // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. - emitFastArithIntToImmNoCheck(regT0, regT0); - - ret(); -#endif - - // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct. - COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit); - - Label virtualCallPreLinkBegin = align(); - - // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); - Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0); - preserveReturnAddressAfterCall(regT3); - restoreArgumentReference(); - Call callJSFunction1 = call(); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - hasCodeBlock1.link(this); - - Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee arity. - Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 2); - emitPutJITStubArg(regT0, 4); - restoreArgumentReference(); - Call callArityCheck1 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - arityCheckOkay1.link(this); - isNativeFunc1.link(this); - - compileOpCallInitializeCallFrame(); - - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 2); - restoreArgumentReference(); - Call callDontLazyLinkCall = call(); - emitGetJITStubArg(1, regT2); - restoreReturnAddressBeforeReturn(regT3); - - jump(regT0); - - Label virtualCallLinkBegin = align(); - - // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); - Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0); - preserveReturnAddressAfterCall(regT3); - restoreArgumentReference(); - Call callJSFunction2 = call(); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - hasCodeBlock2.link(this); - - Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee arity. - Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 2); - emitPutJITStubArg(regT0, 4); - restoreArgumentReference(); - Call callArityCheck2 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - arityCheckOkay2.link(this); - isNativeFunc2.link(this); - - compileOpCallInitializeCallFrame(); - - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 2); - restoreArgumentReference(); - Call callLazyLinkCall = call(); - restoreReturnAddressBeforeReturn(regT3); - - jump(regT0); - - Label virtualCallBegin = align(); - - // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); - Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0); - preserveReturnAddressAfterCall(regT3); - restoreArgumentReference(); - Call callJSFunction3 = call(); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. - hasCodeBlock3.link(this); - - Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); - - // Check argCount matches callee arity. - Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); - preserveReturnAddressAfterCall(regT3); - emitPutJITStubArg(regT3, 2); - emitPutJITStubArg(regT0, 4); - restoreArgumentReference(); - Call callArityCheck3 = call(); - move(regT1, callFrameRegister); - emitGetJITStubArg(1, regT2); - emitGetJITStubArg(3, regT1); - restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. - arityCheckOkay3.link(this); - isNativeFunc3.link(this); - - // load ctiCode from the new codeBlock. - loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0); - - compileOpCallInitializeCallFrame(); - jump(regT0); - - - Label nativeCallThunk = align(); - preserveReturnAddressAfterCall(regT0); - emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address - - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1); - emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); - - -#if PLATFORM(X86_64) - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx); - - // Allocate stack space for our arglist - subPtr(Imm32(sizeof(ArgList)), stackPointerRegister); - COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned); - - // Set up arguments - subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount - - // Push argcount - storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount))); - - // Calculate the start of the callframe header, and store in edx - addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx); - - // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx) - mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx); - subPtr(X86::ecx, X86::edx); - - // push pointer to arguments - storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args))); - - // ArgList is passed by reference so is stackPointerRegister - move(stackPointerRegister, X86::ecx); - - // edx currently points to the first argument, edx-sizeof(Register) points to 'this' - loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx); - - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi); - - move(callFrameRegister, X86::edi); - - call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data))); - - addPtr(Imm32(sizeof(ArgList)), stackPointerRegister); -#elif PLATFORM(X86) - emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); - - /* We have two structs that we use to describe the stackframe we set up for our - * call to native code. NativeCallFrameStructure describes the how we set up the stack - * in advance of the call. NativeFunctionCalleeSignature describes the callframe - * as the native code expects it. We do this as we are using the fastcall calling - * convention which results in the callee popping its arguments off the stack, but - * not the rest of the callframe so we need a nice way to ensure we increment the - * stack pointer by the right amount after the call. - */ -#if COMPILER(MSVC) || PLATFORM(LINUX) - struct NativeCallFrameStructure { - // CallFrame* callFrame; // passed in EDX - JSObject* callee; - JSValue thisValue; - ArgList* argPointer; - ArgList args; - JSValue result; - }; - struct NativeFunctionCalleeSignature { - JSObject* callee; - JSValue thisValue; - ArgList* argPointer; - }; -#else - struct NativeCallFrameStructure { - // CallFrame* callFrame; // passed in ECX - // JSObject* callee; // passed in EDX - JSValue thisValue; - ArgList* argPointer; - ArgList args; - }; - struct NativeFunctionCalleeSignature { - JSValue thisValue; - ArgList* argPointer; - }; -#endif - const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15; - // Allocate system stack frame - subPtr(Imm32(NativeCallFrameSize), stackPointerRegister); - - // Set up arguments - subPtr(Imm32(1), regT0); // Don't include 'this' in argcount - - // push argcount - storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount))); - - // Calculate the start of the callframe header, and store in regT1 - addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1); - - // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0) - mul32(Imm32(sizeof(Register)), regT0, regT0); - subPtr(regT0, regT1); - storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args))); - - // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0); - storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer))); - - // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this' - loadPtr(Address(regT1, -(int)sizeof(Register)), regT1); - storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue))); - -#if COMPILER(MSVC) || PLATFORM(LINUX) - // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx); - - // Plant callee - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax); - storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee))); - - // Plant callframe - move(callFrameRegister, X86::edx); - - call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data))); - - // JSValue is a non-POD type - loadPtr(Address(X86::eax), X86::eax); -#else - // Plant callee - emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx); - - // Plant callframe - move(callFrameRegister, X86::ecx); - call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data))); -#endif - - // We've put a few temporaries on the stack in addition to the actual arguments - // so pull them off now - addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister); - -#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) -#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." -#else - breakpoint(); -#endif - - // Check for an exception - loadPtr(&(globalData->exception), regT2); - Jump exceptionHandler = branchTestPtr(NonZero, regT2); - - // Grab the return address. - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); - - // Restore our caller's "r". - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - - // Return. - restoreReturnAddressBeforeReturn(regT1); - ret(); - - // Handle an exception - exceptionHandler.link(this); - // Grab the return address. - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); - move(ImmPtr(&globalData->exceptionLocation), regT2); - storePtr(regT1, regT2); - move(ImmPtr(reinterpret_cast(ctiVMThrowTrampoline)), regT2); - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); - restoreReturnAddressBeforeReturn(regT2); - ret(); - - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); - Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); - Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); -#endif - - // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); - -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail)); -#endif - patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck)); - patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction)); - patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction)); - patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_JSFunction)); - patchBuffer.link(callDontLazyLinkCall, FunctionPtr(cti_vm_dontLazyLinkCall)); - patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall)); - - CodeRef finalCode = patchBuffer.finalizeCode(); - *executablePool = finalCode.m_executablePool; - - *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin); - *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin); - *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin); - *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk); -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin); -#else - UNUSED_PARAM(ctiStringLengthTrampoline); -#endif -} - -void JIT::emit_op_mov(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; - - if (m_codeBlock->isConstantRegisterIndex(src)) { - storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); - if (dst == m_lastResultBytecodeRegister) - killLastResultRegister(); - } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) { - // If either the src or dst is the cached register go though - // get/put registers to make sure we track this correctly. - emitGetVirtualRegister(src, regT0); - emitPutVirtualRegister(dst); - } else { - // Perform the copy via regT1; do not disturb any mapping in regT0. - loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1); - storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register))); - } -} - -void JIT::emit_op_end(Instruction* currentInstruction) -{ - if (m_codeBlock->needsFullScopeChain()) - JITStubCall(this, cti_op_end).call(); - ASSERT(returnValueRegister != callFrameRegister); - emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); - restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast(sizeof(Register)))); - ret(); -} - -void JIT::emit_op_jmp(Instruction* currentInstruction) -{ - unsigned target = currentInstruction[1].u.operand; - addJump(jump(), target + 1); - RECORD_JUMP_TARGET(target + 1); -} - -void JIT::emit_op_loop(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned target = currentInstruction[1].u.operand; - addJump(jump(), target + 1); -} - -void JIT::emit_op_loop_if_less(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - int32_t op2imm = getConstantOperandImmediateInt(op2); -#else - int32_t op2imm = static_cast(JSImmediate::rawValue(getConstantOperand(op2))); -#endif - addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3); - } else if (isOperandConstantImmediateInt(op1)) { - emitGetVirtualRegister(op2, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - int32_t op1imm = getConstantOperandImmediateInt(op1); -#else - int32_t op1imm = static_cast(JSImmediate::rawValue(getConstantOperand(op1))); -#endif - addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3); - } else { - emitGetVirtualRegisters(op1, regT0, op2, regT1); - emitJumpSlowCaseIfNotImmediateInteger(regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT1); - addJump(branch32(LessThan, regT0, regT1), target + 3); - } -} - -void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - int32_t op2imm = getConstantOperandImmediateInt(op2); -#else - int32_t op2imm = static_cast(JSImmediate::rawValue(getConstantOperand(op2))); -#endif - addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3); - } else { - emitGetVirtualRegisters(op1, regT0, op2, regT1); - emitJumpSlowCaseIfNotImmediateInteger(regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT1); - addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3); - } -} - -void JIT::emit_op_new_object(Instruction* currentInstruction) -{ - JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_instanceof(Instruction* currentInstruction) -{ - // Load the operands (baseVal, proto, and value respectively) into registers. - // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. - emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); - emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); - emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); - - // Check that baseVal & proto are cells. - emitJumpSlowCaseIfNotJSCell(regT0); - emitJumpSlowCaseIfNotJSCell(regT1); - - // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); - addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); - - // If value is not an Object, return false. - Jump valueIsImmediate = emitJumpIfNotJSCell(regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); - - // Check proto is object. - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); - - // Optimistically load the result true, and start looping. - // Initially, regT1 still contains proto and regT2 still contains value. - // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain. - move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0); - Label loop(this); - - // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! - // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); - Jump isInstance = branchPtr(Equal, regT2, regT1); - branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop); - - // We get here either by dropping out of the loop, or if value was not an Object. Result is false. - valueIsImmediate.link(this); - valueIsNotObject.link(this); - move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0); - - // isInstance jumps right down to here, to skip setting the result to false (it has already set true). - isInstance.link(this); - emitPutVirtualRegister(currentInstruction[1].u.operand); -} - -void JIT::emit_op_new_func(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_func); - stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_call(Instruction* currentInstruction) -{ - compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); -} - -void JIT::emit_op_call_eval(Instruction* currentInstruction) -{ - compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++); -} - -void JIT::emit_op_load_varargs(Instruction* currentInstruction) -{ - int argCountDst = currentInstruction[1].u.operand; - int argsOffset = currentInstruction[2].u.operand; - - JITStubCall stubCall(this, cti_op_load_varargs); - stubCall.addArgument(Imm32(argsOffset)); - stubCall.call(); - // Stores a naked int32 in the register file. - store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register))); -} - -void JIT::emit_op_call_varargs(Instruction* currentInstruction) -{ - compileOpCallVarargs(currentInstruction); -} - -void JIT::emit_op_construct(Instruction* currentInstruction) -{ - compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); -} - -void JIT::emit_op_get_global_var(Instruction* currentInstruction) -{ - JSVariableObject* globalObject = static_cast(currentInstruction[2].u.jsCell); - move(ImmPtr(globalObject), regT0); - emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); -} - -void JIT::emit_op_put_global_var(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); - JSVariableObject* globalObject = static_cast(currentInstruction[1].u.jsCell); - move(ImmPtr(globalObject), regT0); - emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand); -} - -void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) -{ - int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(); - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0); - while (skip--) - loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0); - emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); -} - -void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) -{ - int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain(); - - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1); - emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); - while (skip--) - loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1); - - loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1); - emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand); -} - -void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_tear_off_activation); - stubCall.addArgument(currentInstruction[1].u.operand, regT2); - stubCall.call(); -} - -void JIT::emit_op_tear_off_arguments(Instruction*) -{ - JITStubCall(this, cti_op_tear_off_arguments).call(); -} - -void JIT::emit_op_ret(Instruction* currentInstruction) -{ - // We could JIT generate the deref, only calling out to C when the refcount hits zero. - if (m_codeBlock->needsFullScopeChain()) - JITStubCall(this, cti_op_ret_scopeChain).call(); - - ASSERT(callFrameRegister != regT1); - ASSERT(regT1 != returnValueRegister); - ASSERT(returnValueRegister != callFrameRegister); - - // Return the result in %eax. - emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); - - // Grab the return address. - emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); - - // Restore our caller's "r". - emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - - // Return. - restoreReturnAddressBeforeReturn(regT1); - ret(); -} - -void JIT::emit_op_new_array(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_array); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_construct_verify(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - emitJumpSlowCaseIfNotJSCell(regT0); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType))); - -} - -void JIT::emit_op_to_primitive(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; - - emitGetVirtualRegister(src, regT0); - - Jump isImm = emitJumpIfNotJSCell(regT0); - addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); - isImm.link(this); - - if (dst != src) - emitPutVirtualRegister(dst); - -} - -void JIT::emit_op_strcat(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_strcat); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_loop_if_true(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); - addJump(emitJumpIfImmediateInteger(regT0), target + 2); - - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2); - addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); - - isZero.link(this); -}; -void JIT::emit_op_resolve_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_skip(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_skip); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain())); - stubCall.call(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_global(Instruction* currentInstruction) -{ - // Fast case - void* globalObject = currentInstruction[2].u.jsCell; - Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); - - unsigned currentIndex = m_globalResolveInfoIndex++; - void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure); - void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); - - // Check Structure of global object - move(ImmPtr(globalObject), regT0); - loadPtr(structureAddress, regT1); - Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match - - // Load cached property - // Assume that the global object always uses external storage. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0); - load32(offsetAddr, regT1); - loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); - Jump end = jump(); - - // Slow case - noMatch.link(this); - JITStubCall stubCall(this, cti_op_resolve_global); - stubCall.addArgument(ImmPtr(globalObject)); - stubCall.addArgument(ImmPtr(ident)); - stubCall.addArgument(Imm32(currentIndex)); - stubCall.call(currentInstruction[1].u.operand); - end.link(this); -} - -void JIT::emit_op_not(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - xorPtr(Imm32(static_cast(JSImmediate::FullTagTypeBool)), regT0); - addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast(~JSImmediate::ExtendedPayloadBitBoolValue)))); - xorPtr(Imm32(static_cast(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); -} + emitPutVirtualRegister(currentInstruction[1].u.operand); +} void JIT::emit_op_jfalse(Instruction* currentInstruction) { unsigned target = currentInstruction[2].u.operand; emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2); + addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target); Jump isNonZero = emitJumpIfImmediateInteger(regT0); - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2); - addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true))))); + addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target); + addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true))))); isNonZero.link(this); - RECORD_JUMP_TARGET(target + 2); -}; +} + void JIT::emit_op_jeq_null(Instruction* currentInstruction) { unsigned src = currentInstruction[1].u.operand; @@ -2287,17 +403,19 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); - Jump wasNotImmediate = jump(); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); + Jump masqueradesGlobalObjectIsForeign = jump(); // Now handle the immediate cases - undefined & null isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2); + and64(TrustedImm32(~TagBitUndefined), regT0); + addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); - wasNotImmediate.link(this); - RECORD_JUMP_TARGET(target + 2); + isNotMasqueradesAsUndefined.link(this); + masqueradesGlobalObjectIsForeign.link(this); }; void JIT::emit_op_jneq_null(Instruction* currentInstruction) { @@ -2308,105 +426,58 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); - addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2); + and64(TrustedImm32(~TagBitUndefined), regT0); + addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); wasNotImmediate.link(this); - RECORD_JUMP_TARGET(target + 2); } void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) { unsigned src = currentInstruction[1].u.operand; - JSCell* ptr = currentInstruction[2].u.jsCell; + Special::Pointer ptr = currentInstruction[2].u.specialPointer; unsigned target = currentInstruction[3].u.operand; emitGetVirtualRegister(src, regT0); - addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3); - - RECORD_JUMP_TARGET(target + 3); -} - -void JIT::emit_op_jsr(Instruction* currentInstruction) -{ - int retAddrDst = currentInstruction[1].u.operand; - int target = currentInstruction[2].u.operand; - DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); - addJump(jump(), target + 2); - m_jsrSites.append(JSRInfo(storeLocation, label())); - killLastResultRegister(); - RECORD_JUMP_TARGET(target + 2); -} - -void JIT::emit_op_sret(Instruction* currentInstruction) -{ - jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); - killLastResultRegister(); + addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target); } void JIT::emit_op_eq(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - set32(Equal, regT1, regT0, regT0); + compare32(Equal, regT1, regT0, regT0); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_bitnot(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - not32(regT0); - emitFastArithIntToImmNoCheck(regT0, regT0); -#else - xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0); -#endif - emitPutVirtualRegister(currentInstruction[1].u.operand); -} - -void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_resolve_with_base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(currentInstruction[2].u.operand); -} - -void JIT::emit_op_new_func_exp(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_func_exp); - stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - void JIT::emit_op_jtrue(Instruction* currentInstruction) { unsigned target = currentInstruction[2].u.operand; emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); - addJump(emitJumpIfImmediateInteger(regT0), target + 2); + Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))); + addJump(emitJumpIfImmediateInteger(regT0), target); - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2); - addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); + addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target); + addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false))))); isZero.link(this); - RECORD_JUMP_TARGET(target + 2); } void JIT::emit_op_neq(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - set32(NotEqual, regT1, regT0, regT0); + compare32(NotEqual, regT1, regT0, regT0); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); @@ -2417,23 +488,16 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - xorPtr(regT1, regT0); + xor64(regT1, regT0); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_new_regexp(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_new_regexp); - stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - void JIT::emit_op_bitor(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - orPtr(regT1, regT0); + or64(regT1, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } @@ -2450,22 +514,120 @@ void JIT::emit_op_throw(Instruction* currentInstruction) #endif } +void JIT::emit_op_get_pnames(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int breakTarget = currentInstruction[5].u.operand; + + JumpList isNotObject; + + emitGetVirtualRegister(base, regT0); + if (!m_codeBlock->isKnownNotImmediate(base)) + isNotObject.append(emitJumpIfNotJSCell(regT0)); + if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + isNotObject.append(emitJumpIfNotObject(regT2)); + } + + // We could inline the case where you have a valid cache, but + // this call doesn't seem to be hot. + Label isObject(this); + JITStubCall getPnamesStubCall(this, cti_op_get_pnames); + getPnamesStubCall.addArgument(regT0); + getPnamesStubCall.call(dst); + load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); + store64(tagTypeNumberRegister, addressFor(i)); + store32(TrustedImm32(Int32Tag), intTagFor(size)); + store32(regT3, intPayloadFor(size)); + Jump end = jump(); + + isNotObject.link(this); + move(regT0, regT1); + and32(TrustedImm32(~TagBitUndefined), regT1); + addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget); + + JITStubCall toObjectStubCall(this, cti_to_object); + toObjectStubCall.addArgument(regT0); + toObjectStubCall.call(base); + jump().linkTo(isObject, this); + + end.link(this); +} + void JIT::emit_op_next_pname(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_next_pname); - stubCall.addArgument(currentInstruction[2].u.operand, regT2); + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int it = currentInstruction[5].u.operand; + int target = currentInstruction[6].u.operand; + + JumpList callHasProperty; + + Label begin(this); + load32(intPayloadFor(i), regT0); + Jump end = branch32(Equal, regT0, intPayloadFor(size)); + + // Grab key @ i + loadPtr(addressFor(it), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); + + load64(BaseIndex(regT2, regT0, TimesEight), regT2); + + emitPutVirtualRegister(dst, regT2); + + // Increment i + add32(TrustedImm32(1), regT0); + store32(regT0, intPayloadFor(i)); + + // Verify that i is valid: + emitGetVirtualRegister(base, regT0); + + // Test base's structure + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); + + // Test base's prototype chain + loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); + addJump(branchTestPtr(Zero, Address(regT3)), target); + + Label checkPrototype(this); + load64(Address(regT2, Structure::prototypeOffset()), regT2); + callHasProperty.append(emitJumpIfNotJSCell(regT2)); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); + addPtr(TrustedImm32(sizeof(Structure*)), regT3); + branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); + + // Continue loop. + addJump(jump(), target); + + // Slow case: Ask the object if i is valid. + callHasProperty.link(this); + emitGetVirtualRegister(dst, regT1); + JITStubCall stubCall(this, cti_has_property); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); stubCall.call(); - Jump endOfIter = branchTestPtr(Zero, regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); - addJump(jump(), currentInstruction[3].u.operand + 3); - endOfIter.link(this); + + // Test for valid key. + addJump(branchTest32(NonZero, regT0), target); + jump().linkTo(begin, this); + + // End of loop. + end.link(this); } -void JIT::emit_op_push_scope(Instruction* currentInstruction) +void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_push_scope); + JITStubCall stubCall(this, cti_op_push_with_scope); stubCall.addArgument(currentInstruction[1].u.operand, regT2); - stubCall.call(currentInstruction[1].u.operand); + stubCall.call(); } void JIT::emit_op_pop_scope(Instruction*) @@ -2480,17 +642,25 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy unsigned src2 = currentInstruction[3].u.operand; emitGetVirtualRegisters(src1, regT0, src2, regT1); - - // Jump to a slow case if either operand is a number, or if both are JSCell*s. + + // Jump slow if both are cells (to cover strings). move(regT0, regT2); - orPtr(regT1, regT2); + or64(regT1, regT2); addSlowCase(emitJumpIfJSCell(regT2)); - addSlowCase(emitJumpIfImmediateNumber(regT2)); + + // Jump slow if either is a double. First test if it's an integer, which is fine, and then test + // if it's a double. + Jump leftOK = emitJumpIfImmediateInteger(regT0); + addSlowCase(emitJumpIfImmediateNumber(regT0)); + leftOK.link(this); + Jump rightOK = emitJumpIfImmediateInteger(regT1); + addSlowCase(emitJumpIfImmediateNumber(regT1)); + rightOK.link(this); if (type == OpStrictEq) - set32(Equal, regT1, regT0, regT0); + compare64(Equal, regT1, regT0, regT0); else - set32(NotEqual, regT1, regT0, regT0); + compare64(NotEqual, regT1, regT0, regT0); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); @@ -2506,46 +676,35 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction) compileOpStrictEq(currentInstruction, OpNStrictEq); } -void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) +void JIT::emit_op_to_number(Instruction* currentInstruction) { int srcVReg = currentInstruction[2].u.operand; emitGetVirtualRegister(srcVReg, regT0); - Jump wasImmediate = emitJumpIfImmediateInteger(regT0); - - emitJumpSlowCaseIfNotJSCell(regT0, srcVReg); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType))); - - wasImmediate.link(this); + addSlowCase(emitJumpIfNotImmediateNumber(regT0)); emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_push_new_scope(Instruction* currentInstruction) +void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_push_new_scope); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); - stubCall.addArgument(currentInstruction[3].u.operand, regT2); - stubCall.call(currentInstruction[1].u.operand); + JITStubCall stubCall(this, cti_op_push_name_scope); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand))); + stubCall.addArgument(currentInstruction[2].u.operand, regT2); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); + stubCall.call(); } void JIT::emit_op_catch(Instruction* currentInstruction) { killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code. - peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); + move(regT0, callFrameRegister); + peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, vm) / sizeof(void*)); + load64(Address(regT3, OBJECT_OFFSETOF(VM, exception)), regT0); + store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(VM, exception))); emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) -{ - JITStubCall stubCall(this, cti_op_jmp_scopes); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.call(); - addJump(jump(), currentInstruction[2].u.operand + 2); - RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2); -} - void JIT::emit_op_switch_imm(Instruction* currentInstruction) { unsigned tableIndex = currentInstruction[1].u.operand; @@ -2554,12 +713,12 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); JITStubCall stubCall(this, cti_op_switch_imm); stubCall.addArgument(scrutinee, regT2); - stubCall.addArgument(Imm32(tableIndex)); + stubCall.addArgument(TrustedImm32(tableIndex)); stubCall.call(); jump(regT0); } @@ -2572,12 +731,12 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); JITStubCall stubCall(this, cti_op_switch_char); stubCall.addArgument(scrutinee, regT2); - stubCall.addArgument(Imm32(tableIndex)); + stubCall.addArgument(TrustedImm32(tableIndex)); stubCall.call(); jump(regT0); } @@ -2590,31 +749,39 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); - m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); JITStubCall stubCall(this, cti_op_switch_string); stubCall.addArgument(scrutinee, regT2); - stubCall.addArgument(Imm32(tableIndex)); + stubCall.addArgument(TrustedImm32(tableIndex)); stubCall.call(); jump(regT0); } -void JIT::emit_op_new_error(Instruction* currentInstruction) +void JIT::emit_op_throw_static_error(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_new_error); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand)))); - stubCall.addArgument(Imm32(m_bytecodeIndex)); - stubCall.call(currentInstruction[1].u.operand); + JITStubCall stubCall(this, cti_op_throw_static_error); + if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber()) + stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand)))); + else + stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand)))); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); + stubCall.call(); } void JIT::emit_op_debug(Instruction* currentInstruction) { +#if ENABLE(DEBUG_WITH_BREAKPOINT) + UNUSED_PARAM(currentInstruction); + breakpoint(); +#else JITStubCall stubCall(this, cti_op_debug); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand)); stubCall.call(); +#endif } void JIT::emit_op_eq_null(Instruction* currentInstruction) @@ -2625,17 +792,24 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(0), regT0); + Jump wasNotMasqueradesAsUndefined = jump(); + isMasqueradesAsUndefined.link(this); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); + comparePtr(Equal, regT0, regT2, regT0); Jump wasNotImmediate = jump(); isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); - setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); + and64(TrustedImm32(~TagBitUndefined), regT0); + compare64(Equal, regT0, TrustedImm32(ValueNull), regT0); wasNotImmediate.link(this); + wasNotMasqueradesAsUndefined.link(this); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); @@ -2650,246 +824,170 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); - setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + move(TrustedImm32(1), regT0); + Jump wasNotMasqueradesAsUndefined = jump(); + isMasqueradesAsUndefined.link(this); + move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); + loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); + comparePtr(NotEqual, regT0, regT2, regT0); Jump wasNotImmediate = jump(); isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); - setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); + and64(TrustedImm32(~TagBitUndefined), regT0); + compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0); wasNotImmediate.link(this); + wasNotMasqueradesAsUndefined.link(this); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); - } void JIT::emit_op_enter(Instruction*) { + emitEnterOptimizationCheck(); + // Even though CTI doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. size_t count = m_codeBlock->m_numVars; for (size_t j = 0; j < count; ++j) emitInitRegister(j); - } -void JIT::emit_op_enter_with_activation(Instruction* currentInstruction) +void JIT::emit_op_create_activation(Instruction* currentInstruction) { - // Even though CTI doesn't use them, we initialize our constant - // registers to zap stale pointers, to avoid unnecessarily prolonging - // object lifetime and increasing GC pressure. - size_t count = m_codeBlock->m_numVars; - for (size_t j = 0; j < count; ++j) - emitInitRegister(j); - + unsigned dst = currentInstruction[1].u.operand; + + Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand); + emitPutVirtualRegister(dst); + activationCreated.link(this); } -void JIT::emit_op_create_arguments(Instruction*) +void JIT::emit_op_create_arguments(Instruction* currentInstruction) { - Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister)); - if (m_codeBlock->m_numParameters == 1) - JITStubCall(this, cti_op_create_arguments_no_params).call(); - else - JITStubCall(this, cti_op_create_arguments).call(); + unsigned dst = currentInstruction[1].u.operand; + + Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); + JITStubCall(this, cti_op_create_arguments).call(); + emitPutVirtualRegister(dst); + emitPutVirtualRegister(unmodifiedArgumentsRegister(dst)); argsCreated.link(this); } - -void JIT::emit_op_init_arguments(Instruction*) -{ - storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister)); -} -void JIT::emit_op_convert_this(Instruction* currentInstruction) +void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) { - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - emitJumpSlowCaseIfNotJSCell(regT0); - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); - addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); + unsigned dst = currentInstruction[1].u.operand; + store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst)); } -void JIT::emit_op_profile_will_call(Instruction* currentInstruction) +void JIT::emit_op_convert_this(Instruction* currentInstruction) { - peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT1)); - - JITStubCall stubCall(this, cti_op_profile_will_call); - stubCall.addArgument(currentInstruction[1].u.operand, regT1); - stubCall.call(); - noProfiler.link(this); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); + emitJumpSlowCaseIfNotJSCell(regT1); + if (shouldEmitProfiling()) { + loadPtr(Address(regT1, JSCell::structureOffset()), regT0); + emitValueProfilingSite(); + } + addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); } -void JIT::emit_op_profile_did_call(Instruction* currentInstruction) +void JIT::emit_op_get_callee(Instruction* currentInstruction) { - peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); - Jump noProfiler = branchTestPtr(Zero, Address(regT1)); - - JITStubCall stubCall(this, cti_op_profile_did_call); - stubCall.addArgument(currentInstruction[1].u.operand, regT1); - stubCall.call(); - noProfiler.link(this); + unsigned result = currentInstruction[1].u.operand; + emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); + emitValueProfilingSite(); + emitPutVirtualRegister(result); } - -// Slow cases - -void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_create_this(Instruction* currentInstruction) { - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_convert_this); - stubCall.addArgument(regT0); - stubCall.call(currentInstruction[1].u.operand); -} + int callee = currentInstruction[2].u.operand; + RegisterID calleeReg = regT0; + RegisterID resultReg = regT0; + RegisterID allocatorReg = regT1; + RegisterID structureReg = regT2; + RegisterID scratchReg = regT3; -void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector::iterator& iter) -{ - linkSlowCase(iter); - linkSlowCase(iter); - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + emitGetVirtualRegister(callee, calleeReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + addSlowCase(branchTestPtr(Zero, allocatorReg)); + + emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector::iterator& iter) { - linkSlowCase(iter); + linkSlowCase(iter); // doesn't have an allocation profile + linkSlowCase(iter); // allocation failed - JITStubCall stubCall(this, cti_op_to_primitive); - stubCall.addArgument(regT0); + JITStubCall stubCall(this, cti_op_create_this); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); stubCall.call(currentInstruction[1].u.operand); } -void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { - // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. - Label beginGetByValSlow(this); - - Jump notImm = getSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); - emitFastArithIntToImmNoCheck(regT1, regT1); - - notImm.link(this); - JITStubCall stubCall(this, cti_op_get_by_val); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(currentInstruction[1].u.operand); - emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); - - // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off. - // First, check if this is an access to the vector - linkSlowCase(iter); - branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow); - - // okay, missed the fast region, but it is still in the vector. Get the value. - loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2); - // Check whether the value loaded is zero; if so we need to return undefined. - branchTestPtr(Zero, regT2, beginGetByValSlow); - move(regT2, regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); + JITStubCall stubCall(this, cti_op_profile_will_call); + stubCall.addArgument(currentInstruction[1].u.operand, regT1); + stubCall.call(); } -void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emit_op_profile_did_call(Instruction* currentInstruction) { - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_loop_if_less); - stubCall.addArgument(regT0); - stubCall.addArgument(op2, regT2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); - } else if (isOperandConstantImmediateInt(op1)) { - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_loop_if_less); - stubCall.addArgument(op1, regT2); - stubCall.addArgument(regT0); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); - } else { - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_loop_if_less); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); - } + JITStubCall stubCall(this, cti_op_profile_did_call); + stubCall.addArgument(currentInstruction[1].u.operand, regT1); + stubCall.call(); } -void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_loop_if_lesseq); - stubCall.addArgument(regT0); - stubCall.addArgument(currentInstruction[2].u.operand, regT2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); - } else { - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_loop_if_lesseq); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); - } -} -void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector::iterator& iter) -{ - // Normal slow cases - either is not an immediate imm, or is an array. - Jump notImm = getSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); - emitFastArithIntToImmNoCheck(regT1, regT1); +// Slow cases - notImm.link(this); { - JITStubCall stubCall(this, cti_op_put_by_val); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.addArgument(currentInstruction[3].u.operand, regT2); - stubCall.call(); - emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val)); - } +void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector::iterator& iter) +{ + void* globalThis = m_codeBlock->globalObject()->globalThis(); - // slow cases for immediate int accesses to arrays linkSlowCase(iter); - linkSlowCase(iter); { - JITStubCall stubCall(this, cti_op_put_by_val_array); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.addArgument(currentInstruction[3].u.operand, regT2); - stubCall.call(); - } + if (shouldEmitProfiling()) + move(TrustedImm64((JSValue::encode(jsUndefined()))), regT0); + Jump isNotUndefined = branch64(NotEqual, regT1, TrustedImm64(JSValue::encode(jsUndefined()))); + emitValueProfilingSite(); + move(TrustedImm64(JSValue::encode(JSValue(static_cast(globalThis)))), regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this)); + + linkSlowCase(iter); + if (shouldEmitProfiling()) + move(TrustedImm64(JSValue::encode(m_vm->stringStructure.get())), regT0); + isNotUndefined.link(this); + emitValueProfilingSite(); + JITStubCall stubCall(this, cti_op_convert_this); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); } -void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector::iterator& iter) { linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jtrue); + + JITStubCall stubCall(this, cti_op_to_primitive); stubCall.addArgument(regT0); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2); + stubCall.call(currentInstruction[1].u.operand); } void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector::iterator& iter) { linkSlowCase(iter); - xorPtr(Imm32(static_cast(JSImmediate::FullTagTypeBool)), regT0); + xor64(TrustedImm32(static_cast(ValueFalse)), regT0); JITStubCall stubCall(this, cti_op_not); stubCall.addArgument(regT0); stubCall.call(currentInstruction[1].u.operand); @@ -2901,15 +999,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) -{ - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_bitnot); - stubCall.addArgument(regT0); - stubCall.call(currentInstruction[1].u.operand); + emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted! } void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector::iterator& iter) @@ -2918,7 +1008,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector::iterator& iter) @@ -2957,13 +1047,14 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector stubCall.addArgument(regT0); stubCall.addArgument(regT1); stubCall.call(); - xor32(Imm32(0x1), regT0); + xor32(TrustedImm32(0x1), regT0); emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector::iterator& iter) { + linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_stricteq); @@ -2974,6 +1065,7 @@ void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector::iterator& iter) { + linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_nstricteq); @@ -2982,51 +1074,650 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector::iterator& iter) { + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + unsigned baseVal = currentInstruction[3].u.operand; + + linkSlowCaseIfNotJSCell(iter, baseVal); linkSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); + JITStubCall stubCall(this, cti_op_check_has_instance); + stubCall.addArgument(value, regT2); + stubCall.addArgument(baseVal, regT2); + stubCall.call(dst); + + emitJumpSlowToHot(jump(), currentInstruction[4].u.operand); +} + +void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector::iterator& iter) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + unsigned proto = currentInstruction[3].u.operand; + + linkSlowCaseIfNotJSCell(iter, value); + linkSlowCaseIfNotJSCell(iter, proto); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_instanceof); - stubCall.addArgument(currentInstruction[2].u.operand, regT2); - stubCall.addArgument(currentInstruction[3].u.operand, regT2); - stubCall.addArgument(currentInstruction[4].u.operand, regT2); - stubCall.call(currentInstruction[1].u.operand); + stubCall.addArgument(value, regT2); + stubCall.addArgument(proto, regT2); + stubCall.call(dst); } void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector::iterator& iter) { - compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call); + compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++); } void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector::iterator& iter) { - compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval); + compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex); } - + void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector::iterator& iter) { - compileOpCallVarargsSlowCase(currentInstruction, iter); + compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); } void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector::iterator& iter) { - compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct); + compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++); } -void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector::iterator& iter) { - linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_to_jsnumber); + JITStubCall stubCall(this, cti_op_to_number); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); + sub32(TrustedImm32(1), regT0); + emitFastArithReTagImmediate(regT0, regT0); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector::iterator& iter) +{ + linkSlowCase(iter); + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); + + emitGetVirtualRegister(base, regT0); + JITStubCall stubCall(this, cti_op_get_by_id_generic); stubCall.addArgument(regT0); + stubCall.addArgument(TrustedImmPtr(ident)); + stubCall.call(dst); +} + +void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetVirtualRegister(property, regT1); + addSlowCase(emitJumpIfNotImmediateInteger(regT1)); + add32(TrustedImm32(1), regT1); + // regT1 now contains the integer index of the argument we want, including this + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2); + addSlowCase(branch32(AboveOrEqual, regT1, regT2)); + + neg32(regT1); + signExtend32ToPtr(regT1, regT1); + load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast(sizeof(Register))), regT0); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector::iterator& iter) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned arguments = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + linkSlowCase(iter); + Jump skipArgumentsCreation = jump(); + + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall(this, cti_op_create_arguments).call(); + emitPutVirtualRegister(arguments); + emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments)); + + skipArgumentsCreation.link(this); + JITStubCall stubCall(this, cti_op_get_by_val_generic); + stubCall.addArgument(arguments, regT2); + stubCall.addArgument(property, regT2); + stubCall.callWithValueProfiling(dst); +} + +void JIT::emit_op_put_to_base(Instruction* currentInstruction) +{ + int base = currentInstruction[1].u.operand; + int id = currentInstruction[2].u.operand; + int value = currentInstruction[3].u.operand; + + PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation; + switch (operation->m_kind) { + case PutToBaseOperation::GlobalVariablePutChecked: + addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer))); + case PutToBaseOperation::GlobalVariablePut: { + JSGlobalObject* globalObject = m_codeBlock->globalObject(); + if (operation->m_isDynamic) { + emitGetVirtualRegister(base, regT0); + addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(globalObject))); + } + emitGetVirtualRegister(value, regT0); + store64(regT0, operation->m_registerAddress); + if (Heap::isWriteBarrierEnabled()) + emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); + return; + } + case PutToBaseOperation::VariablePut: { + emitGetVirtualRegisters(base, regT0, value, regT1); + loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT2); + store64(regT1, Address(regT2, operation->m_offset * sizeof(Register))); + if (Heap::isWriteBarrierEnabled()) + emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess); + return; + } + + case PutToBaseOperation::GlobalPropertyPut: { + emitGetVirtualRegisters(base, regT0, value, regT1); + loadPtr(&operation->m_structure, regT2); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2)); + ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity()); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); + load32(&operation->m_offsetInButterfly, regT3); + signExtend32ToPtr(regT3, regT3); + store64(regT1, BaseIndex(regT2, regT3, TimesEight)); + if (Heap::isWriteBarrierEnabled()) + emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess); + return; + } + + case PutToBaseOperation::Uninitialised: + case PutToBaseOperation::Readonly: + case PutToBaseOperation::Generic: + JITStubCall stubCall(this, cti_op_put_to_base); + + stubCall.addArgument(TrustedImm32(base)); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id))); + stubCall.addArgument(TrustedImm32(value)); + stubCall.addArgument(TrustedImmPtr(operation)); + stubCall.call(); + return; + } +} + +#endif // USE(JSVALUE64) + +void JIT::emit_op_loop_hint(Instruction*) +{ + // Emit the JIT optimization check: + if (canBeOptimized()) + addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), + AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); + + // Emit the watchdog timer check: + if (m_vm->watchdog.isEnabled()) + addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress()))); +} + +void JIT::emitSlow_op_loop_hint(Instruction*, Vector::iterator& iter) +{ +#if ENABLE(DFG_JIT) + // Emit the slow path for the JIT optimization check: + if (canBeOptimized()) { + linkSlowCase(iter); + + JITStubCall stubCall(this, cti_optimize); + stubCall.addArgument(TrustedImm32(m_bytecodeOffset)); + stubCall.call(); + + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint)); + } +#endif + + // Emit the slow path of the watchdog timer check: + if (m_vm->watchdog.isEnabled()) { + linkSlowCase(iter); + + JITStubCall stubCall(this, cti_handle_watchdog_timer); + stubCall.call(); + + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint)); + } + +} + +void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR) +{ + +#if USE(JSVALUE32_64) + unmap(); +#else + killLastResultRegister(); +#endif + + if (resolveOperations->isEmpty()) { + addSlowCase(jump()); + return; + } + + const RegisterID value = regT0; +#if USE(JSVALUE32_64) + const RegisterID valueTag = regT1; +#endif + const RegisterID scope = regT2; + const RegisterID scratch = regT3; + + JSGlobalObject* globalObject = m_codeBlock->globalObject(); + ResolveOperation* pc = resolveOperations->data(); + emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, scope); + bool setBase = false; + bool resolvingBase = true; + while (resolvingBase) { + switch (pc->m_operation) { + case ResolveOperation::ReturnGlobalObjectAsBase: + move(TrustedImmPtr(globalObject), value); +#if USE(JSVALUE32_64) + move(TrustedImm32(JSValue::CellTag), valueTag); +#endif + emitValueProfilingSite(); + emitStoreCell(*baseVR, value); + return; + case ResolveOperation::SetBaseToGlobal: + RELEASE_ASSERT(baseVR); + setBase = true; + move(TrustedImmPtr(globalObject), scratch); + emitStoreCell(*baseVR, scratch); + resolvingBase = false; + ++pc; + break; + case ResolveOperation::SetBaseToUndefined: { + RELEASE_ASSERT(baseVR); + setBase = true; +#if USE(JSVALUE64) + move(TrustedImm64(JSValue::encode(jsUndefined())), scratch); + emitPutVirtualRegister(*baseVR, scratch); +#else + emitStore(*baseVR, jsUndefined()); +#endif + resolvingBase = false; + ++pc; + break; + } + case ResolveOperation::SetBaseToScope: + RELEASE_ASSERT(baseVR); + setBase = true; + emitStoreCell(*baseVR, scope); + resolvingBase = false; + ++pc; + break; + case ResolveOperation::ReturnScopeAsBase: + emitStoreCell(*baseVR, scope); + RELEASE_ASSERT(value == regT0); + move(scope, value); +#if USE(JSVALUE32_64) + move(TrustedImm32(JSValue::CellTag), valueTag); +#endif + emitValueProfilingSite(); + return; + case ResolveOperation::SkipTopScopeNode: { +#if USE(JSVALUE32_64) + Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag)); +#else + Jump activationNotCreated = branchTest64(Zero, addressFor(m_codeBlock->activationRegister())); +#endif + loadPtr(Address(scope, JSScope::offsetOfNext()), scope); + activationNotCreated.link(this); + ++pc; + break; + } + case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: { + move(scope, regT3); + loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1); + Jump atTopOfScope = branchTestPtr(Zero, regT1); + Label loopStart = label(); + loadPtr(Address(regT3, JSCell::structureOffset()), regT2); + Jump isActivation = branchPtr(Equal, regT2, TrustedImmPtr(globalObject->activationStructure())); + addSlowCase(branchPtr(NotEqual, regT2, TrustedImmPtr(globalObject->nameScopeStructure()))); + isActivation.link(this); + move(regT1, regT3); + loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1); + branchTestPtr(NonZero, regT1, loopStart); + atTopOfScope.link(this); + ++pc; + break; + } + case ResolveOperation::SkipScopes: { + for (int i = 0; i < pc->m_scopesToSkip; i++) + loadPtr(Address(scope, JSScope::offsetOfNext()), scope); + ++pc; + break; + } + case ResolveOperation::Fail: + addSlowCase(jump()); + return; + default: + resolvingBase = false; + } + } + if (baseVR && !setBase) + emitStoreCell(*baseVR, scope); + + RELEASE_ASSERT(valueVR); + ResolveOperation* resolveValueOperation = pc; + switch (resolveValueOperation->m_operation) { + case ResolveOperation::GetAndReturnGlobalProperty: { + // Verify structure. + move(TrustedImmPtr(globalObject), regT2); + move(TrustedImmPtr(resolveValueOperation), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_structure)), regT1); + addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset()))); + + // Load property. + load32(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_offset)), regT3); + + // regT2: GlobalObject + // regT3: offset +#if USE(JSVALUE32_64) + compileGetDirectOffset(regT2, valueTag, value, regT3, KnownNotFinal); +#else + compileGetDirectOffset(regT2, value, regT3, regT1, KnownNotFinal); +#endif + break; + } + case ResolveOperation::GetAndReturnGlobalVarWatchable: + case ResolveOperation::GetAndReturnGlobalVar: { +#if USE(JSVALUE32_64) + load32(reinterpret_cast(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), valueTag); + load32(reinterpret_cast(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), value); +#else + load64(reinterpret_cast(pc->m_registerAddress), value); +#endif + break; + } + case ResolveOperation::GetAndReturnScopedVar: { + loadPtr(Address(scope, JSVariableObject::offsetOfRegisters()), scope); +#if USE(JSVALUE32_64) + load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTag); + load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value); +#else + load64(Address(scope, pc->m_offset * sizeof(Register)), value); +#endif + break; + } + default: + CRASH(); + return; + } + +#if USE(JSVALUE32_64) + emitStore(*valueVR, valueTag, value); +#else + emitPutVirtualRegister(*valueVR, value); +#endif + emitValueProfilingSite(); +} + +void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations, Vector::iterator& iter) +{ + if (resolveOperations->isEmpty()) { + linkSlowCase(iter); + return; + } + + ResolveOperation* pc = resolveOperations->data(); + bool resolvingBase = true; + while (resolvingBase) { + switch (pc->m_operation) { + case ResolveOperation::ReturnGlobalObjectAsBase: + return; + case ResolveOperation::SetBaseToGlobal: + resolvingBase = false; + ++pc; + break; + case ResolveOperation::SetBaseToUndefined: { + resolvingBase = false; + ++pc; + break; + } + case ResolveOperation::SetBaseToScope: + resolvingBase = false; + ++pc; + break; + case ResolveOperation::ReturnScopeAsBase: + return; + case ResolveOperation::SkipTopScopeNode: { + ++pc; + break; + } + case ResolveOperation::SkipScopes: + ++pc; + break; + case ResolveOperation::Fail: + linkSlowCase(iter); + return; + case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: { + linkSlowCase(iter); + ++pc; + break; + } + default: + resolvingBase = false; + } + } + ResolveOperation* resolveValueOperation = pc; + switch (resolveValueOperation->m_operation) { + case ResolveOperation::GetAndReturnGlobalProperty: { + linkSlowCase(iter); + break; + } + case ResolveOperation::GetAndReturnGlobalVarWatchable: + case ResolveOperation::GetAndReturnGlobalVar: + break; + case ResolveOperation::GetAndReturnScopedVar: + break; + default: + CRASH(); + return; + } +} + +void JIT::emit_op_resolve(Instruction* currentInstruction) +{ + ResolveOperations* operations = currentInstruction[3].u.resolveOperations; + int dst = currentInstruction[1].u.operand; + emit_resolve_operations(operations, 0, &dst); +} + +void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector::iterator& iter) +{ + ResolveOperations* operations = currentInstruction[3].u.resolveOperations; + emitSlow_link_resolve_operations(operations, iter); + JITStubCall stubCall(this, cti_op_resolve); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.resolveOperations)); + stubCall.callWithValueProfiling(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_base(Instruction* currentInstruction) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + int dst = currentInstruction[1].u.operand; + emit_resolve_operations(operations, &dst, 0); +} + +void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector::iterator& iter) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + emitSlow_link_resolve_operations(operations, iter); + JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation)); + stubCall.callWithValueProfiling(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + int base = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + emit_resolve_operations(operations, &base, &value); +} + +void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector::iterator& iter) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + emitSlow_link_resolve_operations(operations, iter); + JITStubCall stubCall(this, cti_op_resolve_with_base); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); + stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation)); + stubCall.callWithValueProfiling(currentInstruction[2].u.operand); +} + +void JIT::emit_op_resolve_with_this(Instruction* currentInstruction) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + int base = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + emit_resolve_operations(operations, &base, &value); +} + +void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector::iterator& iter) +{ + ResolveOperations* operations = currentInstruction[4].u.resolveOperations; + emitSlow_link_resolve_operations(operations, iter); + JITStubCall stubCall(this, cti_op_resolve_with_this); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); + stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations)); + stubCall.callWithValueProfiling(currentInstruction[2].u.operand); +} + +void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector::iterator& iter) +{ + int base = currentInstruction[1].u.operand; + int id = currentInstruction[2].u.operand; + int value = currentInstruction[3].u.operand; + + PutToBaseOperation* putToBaseOperation = currentInstruction[4].u.putToBaseOperation; + switch (putToBaseOperation->m_kind) { + case PutToBaseOperation::VariablePut: + return; + + case PutToBaseOperation::GlobalVariablePutChecked: + linkSlowCase(iter); + case PutToBaseOperation::GlobalVariablePut: + if (!putToBaseOperation->m_isDynamic) + return; + linkSlowCase(iter); + break; + + case PutToBaseOperation::Uninitialised: + case PutToBaseOperation::Readonly: + case PutToBaseOperation::Generic: + return; + + case PutToBaseOperation::GlobalPropertyPut: + linkSlowCase(iter); + break; + + } + + JITStubCall stubCall(this, cti_op_put_to_base); + + stubCall.addArgument(TrustedImm32(base)); + stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id))); + stubCall.addArgument(TrustedImm32(value)); + stubCall.addArgument(TrustedImmPtr(putToBaseOperation)); + stubCall.call(); +} + +void JIT::emit_op_new_regexp(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, cti_op_new_regexp); + stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_new_func(Instruction* currentInstruction) +{ + Jump lazyJump; + int dst = currentInstruction[1].u.operand; + if (currentInstruction[3].u.operand) { +#if USE(JSVALUE32_64) + lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); +#else + lazyJump = branchTest64(NonZero, addressFor(dst)); +#endif + } + + JITStubCall stubCall(this, cti_op_new_func); + stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand))); + stubCall.call(dst); + + if (currentInstruction[3].u.operand) { +#if USE(JSVALUE32_64) + unmap(); +#else + killLastResultRegister(); +#endif + lazyJump.link(this); + } +} + +void JIT::emit_op_new_func_exp(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, cti_op_new_func_exp); + stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_new_array(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, cti_op_new_array); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile)); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_new_array_with_size(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, cti_op_new_array_with_size); +#if USE(JSVALUE64) + stubCall.addArgument(currentInstruction[2].u.operand, regT2); +#else + stubCall.addArgument(currentInstruction[2].u.operand); +#endif + stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.arrayAllocationProfile)); stubCall.call(currentInstruction[1].u.operand); } -#endif // USE(JSVALUE32_64) +void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, cti_op_new_array_buffer); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); + stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); + stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile)); + stubCall.call(currentInstruction[1].u.operand); +} } // namespace JSC