X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..refs/heads/master:/jit/JITOpcodes32_64.cpp diff --git a/jit/JITOpcodes32_64.cpp b/jit/JITOpcodes32_64.cpp index 4836a66..8764cc2 100644 --- a/jit/JITOpcodes32_64.cpp +++ b/jit/JITOpcodes32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer * * Redistribution and use in source and binary forms, with or without @@ -30,14 +30,22 @@ #if USE(JSVALUE32_64) #include "JIT.h" +#include "CCallHelpers.h" +#include "Debugger.h" +#include "Exception.h" #include "JITInlines.h" -#include "JITStubCall.h" #include "JSArray.h" #include "JSCell.h" +#include "JSEnvironmentRecord.h" #include "JSFunction.h" -#include "JSPropertyNameIterator.h" -#include "JSVariableObject.h" +#include "JSNameScope.h" +#include "JSPropertyNameEnumerator.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "RepatchBuffer.h" +#include "SlowPathCall.h" +#include "TypeProfilerLog.h" +#include "VirtualRegister.h" namespace JSC { @@ -45,139 +53,80 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) { Call nativeCall; + emitFunctionPrologue(); emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); storePtr(callFrameRegister, &m_vm->topCallFrame); #if CPU(X86) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); - - peek(regT1); - emitPutToCallFrameHeader(regT1, JSStack::ReturnPC); - // Calling convention: f(ecx, edx, ...); // Host function signature: f(ExecState*); move(callFrameRegister, X86Registers::ecx); - subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. - - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - - // call the function - nativeCall = call(); - - addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); - -#elif CPU(ARM) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2); - emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, JSStack::ReturnPC); - - // Calling convention: f(r0 == regT0, r1 == regT1, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, ARMRegisters::r0); - - emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); + subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call. + storePtr(X86Registers::ecx, Address(stackPointerRegister)); // call the function nativeCall = call(); - restoreReturnAddressBeforeReturn(regT3); - -#elif CPU(MIPS) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0); - emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0); - emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, JSStack::ReturnPC); - - // Calling convention: f(a0, a1, a2, a3); - // Host function signature: f(ExecState*); + addPtr(TrustedImm32(8), stackPointerRegister); - // Allocate stack space for 16 bytes (8-byte aligned) - // 16 bytes (unused) for 4 arguments +#elif CPU(ARM) || CPU(SH4) || CPU(MIPS) +#if CPU(MIPS) + // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. subPtr(TrustedImm32(16), stackPointerRegister); +#endif - // Setup arg0 - move(callFrameRegister, MIPSRegisters::a0); + // Calling convention is f(argumentGPR0, argumentGPR1, ...). + // Host function signature is f(ExecState*). + move(callFrameRegister, argumentGPR0); + + emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1); + loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - // Call - emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2); - loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - // call the function nativeCall = call(); +#if CPU(MIPS) // Restore stack space addPtr(TrustedImm32(16), stackPointerRegister); - - restoreReturnAddressBeforeReturn(regT3); -#elif CPU(SH4) - // Load caller frame's scope chain into this callframe so that whatever we call can - // get to its global data. - emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2); - emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2); - emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); - - preserveReturnAddressAfterCall(regT3); // Callee preserved - emitPutToCallFrameHeader(regT3, JSStack::ReturnPC); - - // Calling convention: f(r0 == regT4, r1 == regT5, ...); - // Host function signature: f(ExecState*); - move(callFrameRegister, regT4); - - emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT5); - move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. - loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); - - // call the function - nativeCall = call(); +#endif restoreReturnAddressBeforeReturn(regT3); #else #error "JIT not supported on this platform." - breakpoint(); + abortWithReason(JITNotSupported); #endif // CPU(X86) // Check for an exception - Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast(&vm->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0)); + emitFunctionEpilogue(); // Return. ret(); // Handle an exception sawException.link(this); - // Grab the return address. - preserveReturnAddressAfterCall(regT1); - - move(TrustedImmPtr(&vm->exceptionLocation), regT2); - storePtr(regT1, regT2); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); - storePtr(callFrameRegister, &m_vm->topCallFrame); - // Set the return address. - move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); - restoreReturnAddressBeforeReturn(regT1); - ret(); +#if CPU(X86) + addPtr(TrustedImm32(-4), stackPointerRegister); + loadPtr(Address(callFrameRegister), X86Registers::ecx); + push(X86Registers::ecx); +#else + loadPtr(Address(callFrameRegister), argumentGPR0); +#endif + move(TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), regT3); + call(regT3); + +#if CPU(X86) + addPtr(TrustedImm32(8), stackPointerRegister); +#endif + + jumpToExceptionHandler(); // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID); patchBuffer.link(nativeCall, FunctionPtr(func)); return FINALIZE_CODE(patchBuffer, ("JIT CTI native call")); @@ -185,23 +134,22 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) void JIT::emit_op_mov(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + if (m_codeBlock->isConstantRegisterIndex(src)) emitStore(dst, getConstantOperand(src)); else { emitLoad(src, regT1, regT0); emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0); } } void JIT::emit_op_end(Instruction* currentInstruction) { - ASSERT(returnValueRegister != callFrameRegister); + ASSERT(returnValueGPR != callFrameRegister); emitLoad(currentInstruction[1].u.operand, regT1, regT0); - restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast(sizeof(Register)))); + emitFunctionEpilogue(); ret(); } @@ -214,7 +162,7 @@ void JIT::emit_op_jmp(Instruction* currentInstruction) void JIT::emit_op_new_object(Instruction* currentInstruction) { Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); - size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity()); + size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize); RegisterID resultReg = regT0; @@ -229,14 +177,15 @@ void JIT::emit_op_new_object(Instruction* currentInstruction) void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector::iterator& iter) { linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_new_object); - stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure())); - stubCall.call(currentInstruction[1].u.operand); + int dst = currentInstruction[1].u.operand; + Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); + callOperation(operationNewObject, structure); + emitStoreCell(dst, returnValueGPR); } void JIT::emit_op_check_has_instance(Instruction* currentInstruction) { - unsigned baseVal = currentInstruction[3].u.operand; + int baseVal = currentInstruction[3].u.operand; emitLoadPayload(baseVal, regT0); @@ -244,15 +193,14 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(baseVal); // Check that baseVal 'ImplementsHasInstance'. - loadPtr(Address(regT0, JSCell::structureOffset()), regT0); - addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; - unsigned proto = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int proto = currentInstruction[3].u.operand; // Load the operands into registers. // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. @@ -264,8 +212,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(proto); // Check that prototype is an object - loadPtr(Address(regT1, JSCell::structureOffset()), regT3); - addSlowCase(emitJumpIfNotObject(regT3)); + addSlowCase(emitJumpIfCellNotObject(regT1)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -275,7 +222,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2); load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); branchTest32(NonZero, regT2).linkTo(loop, this); @@ -290,41 +237,39 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; - unsigned baseVal = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int baseVal = currentInstruction[3].u.operand; linkSlowCaseIfNotJSCell(iter, baseVal); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_check_has_instance); - stubCall.addArgument(value); - stubCall.addArgument(baseVal); - stubCall.call(dst); + emitLoad(value, regT1, regT0); + emitLoad(baseVal, regT3, regT2); + callOperation(operationCheckHasInstance, dst, regT1, regT0, regT3, regT2); emitJumpSlowToHot(jump(), currentInstruction[4].u.operand); } void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; - unsigned proto = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + int proto = currentInstruction[3].u.operand; linkSlowCaseIfNotJSCell(iter, value); linkSlowCaseIfNotJSCell(iter, proto); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_instanceof); - stubCall.addArgument(value); - stubCall.addArgument(proto); - stubCall.call(dst); + emitLoad(value, regT1, regT0); + emitLoad(proto, regT3, regT2); + callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2); } void JIT::emit_op_is_undefined(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; emitLoad(value, regT1, regT0); Jump isCell = branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)); @@ -333,12 +278,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) Jump done = jump(); isCell.link(this); - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump notMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); compare32(Equal, regT0, regT1, regT0); @@ -350,8 +295,8 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) void JIT::emit_op_is_boolean(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; emitLoadTag(value, regT0); compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0); @@ -360,8 +305,8 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction) void JIT::emit_op_is_number(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; emitLoadTag(value, regT0); add32(TrustedImm32(1), regT0); @@ -371,14 +316,13 @@ void JIT::emit_op_is_number(Instruction* currentInstruction) void JIT::emit_op_is_string(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned value = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; emitLoad(value, regT1, regT0); Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); Jump done = jump(); isNotCell.link(this); @@ -388,27 +332,22 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitStoreBool(dst, regT0); } -void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) +void JIT::emit_op_is_object(Instruction* currentInstruction) { - unsigned activation = currentInstruction[1].u.operand; - Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); - JITStubCall stubCall(this, cti_op_tear_off_activation); - stubCall.addArgument(activation); - stubCall.call(); - activationNotCreated.link(this); -} + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; -void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) -{ - int arguments = currentInstruction[1].u.operand; - int activation = currentInstruction[2].u.operand; + emitLoad(value, regT1, regT0); + Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); + + compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(0), regT0); - Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments)), TrustedImm32(JSValue::EmptyValueTag)); - JITStubCall stubCall(this, cti_op_tear_off_arguments); - stubCall.addArgument(unmodifiedArgumentsRegister(arguments)); - stubCall.addArgument(activation); - stubCall.call(); - argsNotCreated.link(this); + done.link(this); + emitStoreBool(dst, regT0); } void JIT::emit_op_to_primitive(Instruction* currentInstruction) @@ -419,37 +358,31 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitLoad(src, regT1, regT0); Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + addSlowCase(emitJumpIfCellObject(regT0)); isImm.link(this); if (dst != src) emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0); } void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector::iterator& iter) { - int dst = currentInstruction[1].u.operand; - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_to_primitive); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive); + slowPathCall.call(); } void JIT::emit_op_strcat(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_strcat); - stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); - stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat); + slowPathCall.call(); } void JIT::emit_op_not(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoadTag(src, regT0); @@ -462,19 +395,15 @@ void JIT::emit_op_not(Instruction* currentInstruction) void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_not); - stubCall.addArgument(src); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not); + slowPathCall.call(); } void JIT::emit_op_jfalse(Instruction* currentInstruction) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; emitLoad(cond, regT1, regT0); @@ -486,7 +415,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction) void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; linkSlowCase(iter); @@ -502,15 +431,13 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned cond = currentInstruction[1].u.operand; + int cond = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; linkSlowCase(iter); @@ -538,24 +465,21 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, VectorglobalObject()), regT0); addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump masqueradesGlobalObjectIsForeign = jump(); @@ -572,16 +496,15 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) void JIT::emit_op_jneq_null(Instruction* currentInstruction) { - unsigned src = currentInstruction[1].u.operand; + int src = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); @@ -598,7 +521,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) { - unsigned src = currentInstruction[1].u.operand; + int src = currentInstruction[1].u.operand; Special::Pointer ptr = currentInstruction[2].u.specialPointer; unsigned target = currentInstruction[3].u.operand; @@ -609,9 +532,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) void JIT::emit_op_eq(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; emitLoad2(src1, regT1, regT0, src2, regT3, regT2); addSlowCase(branch32(NotEqual, regT1, regT3)); @@ -625,9 +548,9 @@ void JIT::emit_op_eq(Instruction* currentInstruction) void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned op1 = currentInstruction[2].u.operand; - unsigned op2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int op1 = currentInstruction[2].u.operand; + int op2 = currentInstruction[3].u.operand; JumpList storeResult; JumpList genericCase; @@ -635,33 +558,29 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector: genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); - genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(); + callOperation(operationCompareStringEq, regT0, regT2); storeResult.append(jump()); // Generic case. genericCase.append(getSlowCase(iter)); // doubles genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(op1); - stubCallEq.addArgument(op2); - stubCallEq.call(regT0); + emitLoad(op1, regT1, regT0); + emitLoad(op2, regT3, regT2); + callOperation(operationCompareEq, regT1, regT0, regT3, regT2); storeResult.link(this); - emitStoreBool(dst, regT0); + emitStoreBool(dst, returnValueGPR); } void JIT::emit_op_neq(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; emitLoad2(src1, regT1, regT0, src2, regT3, regT2); addSlowCase(branch32(NotEqual, regT1, regT3)); @@ -675,7 +594,7 @@ void JIT::emit_op_neq(Instruction* currentInstruction) void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; + int dst = currentInstruction[1].u.operand; JumpList storeResult; JumpList genericCase; @@ -683,34 +602,28 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); - genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. - JITStubCall stubCallEqStrings(this, cti_op_eq_strings); - stubCallEqStrings.addArgument(regT0); - stubCallEqStrings.addArgument(regT2); - stubCallEqStrings.call(regT0); + callOperation(operationCompareStringEq, regT0, regT2); storeResult.append(jump()); // Generic case. genericCase.append(getSlowCase(iter)); // doubles genericCase.link(this); - JITStubCall stubCallEq(this, cti_op_eq); - stubCallEq.addArgument(regT1, regT0); - stubCallEq.addArgument(regT3, regT2); - stubCallEq.call(regT0); + callOperation(operationCompareEq, regT1, regT0, regT3, regT2); storeResult.link(this); - xor32(TrustedImm32(0x1), regT0); - emitStoreBool(dst, regT0); + xor32(TrustedImm32(0x1), returnValueGPR); + emitStoreBool(dst, returnValueGPR); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int src1 = currentInstruction[2].u.operand; + int src2 = currentInstruction[3].u.operand; emitLoad2(src1, regT1, regT0, src2, regT3, regT2); @@ -718,12 +631,12 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy addSlowCase(branch32(NotEqual, regT1, regT3)); addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); - // Jump to a slow case if both are strings. + // Jump to a slow case if both are strings or symbols (non object). Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())); - addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + Jump firstIsObject = emitJumpIfCellObject(regT0); + addSlowCase(emitJumpIfCellNotObject(regT2)); notCell.link(this); - firstNotString.link(this); + firstIsObject.link(this); // Simply compare the payloads. if (type == OpStrictEq) @@ -741,18 +654,12 @@ void JIT::emit_op_stricteq(Instruction* currentInstruction) void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_stricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq); + slowPathCall.call(); } void JIT::emit_op_nstricteq(Instruction* currentInstruction) @@ -762,34 +669,28 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction) void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src1 = currentInstruction[2].u.operand; - unsigned src2 = currentInstruction[3].u.operand; - linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_nstricteq); - stubCall.addArgument(src1); - stubCall.addArgument(src2); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq); + slowPathCall.call(); } void JIT::emit_op_eq_null(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT1); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); compare32(Equal, regT0, regT2, regT1); @@ -809,18 +710,18 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) void JIT::emit_op_neq_null(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; - unsigned src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(1), regT1); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); compare32(NotEqual, regT0, regT2, regT1); @@ -840,229 +741,145 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) void JIT::emit_op_throw(Instruction* currentInstruction) { - unsigned exception = currentInstruction[1].u.operand; - JITStubCall stubCall(this, cti_op_throw); - stubCall.addArgument(exception); - stubCall.call(); - -#ifndef NDEBUG - // cti_op_throw always changes it's return address, - // this point in the code should never be reached. - breakpoint(); -#endif + ASSERT(regT0 == returnValueGPR); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperationNoExceptionCheck(operationThrow, regT1, regT0); + jumpToExceptionHandler(); } -void JIT::emit_op_get_pnames(Instruction* currentInstruction) +void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int breakTarget = currentInstruction[5].u.operand; - - JumpList isNotObject; - - emitLoad(base, regT1, regT0); - if (!m_codeBlock->isKnownNotImmediate(base)) - isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) { - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - isNotObject.append(emitJumpIfNotObject(regT2)); - } - - // We could inline the case where you have a valid cache, but - // this call doesn't seem to be hot. - Label isObject(this); - JITStubCall getPnamesStubCall(this, cti_op_get_pnames); - getPnamesStubCall.addArgument(regT0); - getPnamesStubCall.call(dst); - load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); - store32(TrustedImm32(Int32Tag), intTagFor(i)); - store32(TrustedImm32(0), intPayloadFor(i)); - store32(TrustedImm32(Int32Tag), intTagFor(size)); - store32(regT3, payloadFor(size)); - Jump end = jump(); - - isNotObject.link(this); - addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget); - addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget); - JITStubCall toObjectStubCall(this, cti_to_object); - toObjectStubCall.addArgument(regT1, regT0); - toObjectStubCall.call(base); - jump().linkTo(isObject, this); + emitLoad(currentInstruction[2].u.operand, regT1, regT0); + callOperation(operationPushWithScope, dst, regT1, regT0); +} - end.link(this); +void JIT::emit_op_pop_scope(Instruction* currentInstruction) +{ + int scope = currentInstruction[1].u.operand; + callOperation(operationPopScope, scope); } -void JIT::emit_op_next_pname(Instruction* currentInstruction) +void JIT::emit_op_to_number(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int it = currentInstruction[5].u.operand; - int target = currentInstruction[6].u.operand; - - JumpList callHasProperty; - - Label begin(this); - load32(intPayloadFor(i), regT0); - Jump end = branch32(Equal, regT0, intPayloadFor(size)); - - // Grab key @ i - loadPtr(payloadFor(it), regT1); - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); - load32(BaseIndex(regT2, regT0, TimesEight), regT2); - store32(TrustedImm32(JSValue::CellTag), tagFor(dst)); - store32(regT2, payloadFor(dst)); - - // Increment i - add32(TrustedImm32(1), regT0); - store32(regT0, intPayloadFor(i)); - - // Verify that i is valid: - loadPtr(payloadFor(base), regT0); - - // Test base's structure - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); - - // Test base's prototype chain - loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); - addJump(branchTestPtr(Zero, Address(regT3)), target); - - Label checkPrototype(this); - callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag))); - loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); - addPtr(TrustedImm32(sizeof(Structure*)), regT3); - branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); - - // Continue loop. - addJump(jump(), target); + int src = currentInstruction[2].u.operand; - // Slow case: Ask the object if i is valid. - callHasProperty.link(this); - loadPtr(addressFor(dst), regT1); - JITStubCall stubCall(this, cti_has_property); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(); + emitLoad(src, regT1, regT0); - // Test for valid key. - addJump(branchTest32(NonZero, regT0), target); - jump().linkTo(begin, this); + Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); + addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); + isInt32.link(this); - // End of loop. - end.link(this); + if (src != dst) + emitStore(dst, regT1, regT0); } -void JIT::emit_op_push_with_scope(Instruction* currentInstruction) +void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector::iterator& iter) { - JITStubCall stubCall(this, cti_op_push_with_scope); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); -} + linkSlowCase(iter); -void JIT::emit_op_pop_scope(Instruction*) -{ - JITStubCall(this, cti_op_pop_scope).call(); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number); + slowPathCall.call(); } -void JIT::emit_op_to_number(Instruction* currentInstruction) +void JIT::emit_op_to_string(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); - Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); - addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); - isInt32.link(this); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); + addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); if (src != dst) emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_to_number), dst, regT1, regT0); } -void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector::iterator& iter) { - int dst = currentInstruction[1].u.operand; + linkSlowCase(iter); // Not JSCell. + linkSlowCase(iter); // Not JSString. - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_to_number); - stubCall.addArgument(regT1, regT0); - stubCall.call(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); + slowPathCall.call(); } void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_push_name_scope); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand))); - stubCall.addArgument(currentInstruction[2].u.operand); - stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); - stubCall.call(); + int dst = currentInstruction[1].u.operand; + emitLoad(currentInstruction[2].u.operand, regT1, regT0); + if (currentInstruction[4].u.operand == JSNameScope::CatchScope) { + callOperation(operationPushCatchScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT1, regT0); + return; + } + + RELEASE_ASSERT(currentInstruction[4].u.operand == JSNameScope::FunctionNameScope); + callOperation(operationPushFunctionNameScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT1, regT0); } void JIT::emit_op_catch(Instruction* currentInstruction) { - // cti_op_throw returns the callFrame for the handler. - move(regT0, callFrameRegister); + move(TrustedImmPtr(m_vm), regT3); + // operationThrow returns the callFrame for the handler. + load32(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); + load32(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); + store32(regT0, Address(regT3, VM::topVMEntryFrameOffset())); + + addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); + + // Now store the exception returned by operationThrow. + load32(Address(regT3, VM::exceptionOffset()), regT2); + move(TrustedImm32(JSValue::CellTag), regT1); - // Now store the exception returned by cti_op_throw. - loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, vm)), regT3); - load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset())); unsigned exception = currentInstruction[1].u.operand; - emitStore(exception, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0); + emitStore(exception, regT1, regT2); + + load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + + unsigned thrownValue = currentInstruction[2].u.operand; + emitStore(thrownValue, regT1, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); + SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); - JITStubCall stubCall(this, cti_op_switch_imm); - stubCall.addArgument(scrutinee); - stubCall.addArgument(TrustedImm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } void JIT::emit_op_switch_char(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; // create jump table for switch destinations, track this switch statement. - SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); + SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); - JITStubCall stubCall(this, cti_op_switch_char); - stubCall.addArgument(scrutinee); - stubCall.addArgument(TrustedImm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } void JIT::emit_op_switch_string(Instruction* currentInstruction) { - unsigned tableIndex = currentInstruction[1].u.operand; + size_t tableIndex = currentInstruction[1].u.operand; unsigned defaultOffset = currentInstruction[2].u.operand; unsigned scrutinee = currentInstruction[3].u.operand; @@ -1070,40 +887,27 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); - JITStubCall stubCall(this, cti_op_switch_string); - stubCall.addArgument(scrutinee); - stubCall.addArgument(TrustedImm32(tableIndex)); - stubCall.call(); - jump(regT0); + emitLoad(scrutinee, regT1, regT0); + callOperation(operationSwitchStringWithUnknownKeyType, regT1, regT0, tableIndex); + jump(returnValueGPR); } void JIT::emit_op_throw_static_error(Instruction* currentInstruction) { - unsigned message = currentInstruction[1].u.operand; - - JITStubCall stubCall(this, cti_op_throw_static_error); - stubCall.addArgument(m_codeBlock->getConstant(message)); - stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); - stubCall.call(); + emitLoad(m_codeBlock->getConstant(currentInstruction[1].u.operand), regT1, regT0); + callOperation(operationThrowStaticError, regT1, regT0, currentInstruction[2].u.operand); } void JIT::emit_op_debug(Instruction* currentInstruction) { -#if ENABLE(DEBUG_WITH_BREAKPOINT) - UNUSED_PARAM(currentInstruction); - breakpoint(); -#else - JITStubCall stubCall(this, cti_op_debug); - stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); - stubCall.addArgument(Imm32(currentInstruction[4].u.operand)); - stubCall.call(); -#endif + load32(codeBlock()->debuggerRequestsAddress(), regT0); + Jump noDebuggerRequests = branchTest32(Zero, regT0); + callOperation(operationDebug, currentInstruction[1].u.operand); + noDebuggerRequests.link(this); } -void JIT::emit_op_enter(Instruction*) +void JIT::emit_op_enter(Instruction* currentInstruction) { emitEnterOptimizationCheck(); @@ -1111,261 +915,394 @@ void JIT::emit_op_enter(Instruction*) // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. for (int i = 0; i < m_codeBlock->m_numVars; ++i) - emitStore(i, jsUndefined()); -} - -void JIT::emit_op_create_activation(Instruction* currentInstruction) -{ - unsigned activation = currentInstruction[1].u.operand; - - Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); - JITStubCall(this, cti_op_push_activation).call(activation); - activationCreated.link(this); -} - -void JIT::emit_op_create_arguments(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; + emitStore(virtualRegisterForLocal(i).offset(), jsUndefined()); - Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); - - JITStubCall(this, cti_op_create_arguments).call(); - emitStore(dst, regT1, regT0); - emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0); - - argsCreated.link(this); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); + slowPathCall.call(); } -void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; + int lexicalEnvironment = currentInstruction[1].u.operand; + int scope = currentInstruction[2].u.operand; - emitStore(dst, JSValue()); + emitLoadPayload(currentInstruction[2].u.operand, regT0); + callOperation(operationCreateActivation, regT0); + emitStoreCell(lexicalEnvironment, returnValueGPR); + emitStoreCell(scope, returnValueGPR); } -void JIT::emit_op_get_callee(Instruction* currentInstruction) +void JIT::emit_op_get_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - move(TrustedImm32(JSValue::CellTag), regT1); - emitValueProfilingSite(); - emitStore(dst, regT1, regT0); + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); + emitStoreCell(dst, regT0); } void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; + WriteBarrierBase* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; + RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; + RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitLoadPayload(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); + addSlowCase(branchTestPtr(Zero, rareDataReg)); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); + loadPtr(cachedFunction, cachedFunctionReg); + Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); + addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); + hasSeenMultipleCallees.link(this); + emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitStoreCell(currentInstruction[1].u.operand, resultReg); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector::iterator& iter) { + linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile linkSlowCase(iter); // allocation failed + linkSlowCase(iter); // cached function didn't match - JITStubCall stubCall(this, cti_op_create_this); - stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); - stubCall.call(currentInstruction[1].u.operand); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); + slowPathCall.call(); } -void JIT::emit_op_convert_this(Instruction* currentInstruction) +void JIT::emit_op_to_this(Instruction* currentInstruction) { - unsigned thisRegister = currentInstruction[1].u.operand; + WriteBarrierBase* cachedStructure = ¤tInstruction[2].u.structure; + int thisRegister = currentInstruction[1].u.operand; emitLoad(thisRegister, regT3, regT2); addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag))); - if (shouldEmitProfiling()) { - loadPtr(Address(regT2, JSCell::structureOffset()), regT0); - move(regT3, regT1); - emitValueProfilingSite(); - } - addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); + loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0); + loadPtr(cachedStructure, regT2); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); } -void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector::iterator& iter) { - void* globalThis = m_codeBlock->globalObject()->globalThis(); - unsigned thisRegister = currentInstruction[1].u.operand; - linkSlowCase(iter); - if (shouldEmitProfiling()) { - move(TrustedImm32(JSValue::UndefinedTag), regT1); - move(TrustedImm32(0), regT0); - } - Jump isNotUndefined = branch32(NotEqual, regT3, TrustedImm32(JSValue::UndefinedTag)); - emitValueProfilingSite(); - move(TrustedImmPtr(globalThis), regT0); - move(TrustedImm32(JSValue::CellTag), regT1); - emitStore(thisRegister, regT1, regT0); - emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this)); + linkSlowCase(iter); + linkSlowCase(iter); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this); + slowPathCall.call(); +} + +void JIT::emit_op_check_tdz(Instruction* currentInstruction) +{ + emitLoadTag(currentInstruction[1].u.operand, regT0); + addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag))); +} +void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector::iterator& iter) +{ linkSlowCase(iter); - if (shouldEmitProfiling()) { - move(TrustedImm32(JSValue::CellTag), regT1); - move(TrustedImmPtr(m_vm->stringStructure.get()), regT0); - } - isNotUndefined.link(this); - emitValueProfilingSite(); - JITStubCall stubCall(this, cti_op_convert_this); - stubCall.addArgument(regT3, regT2); - stubCall.call(thisRegister); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); + slowPathCall.call(); } void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_profile_will_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); + load32(m_vm->enabledProfilerAddress(), regT0); + Jump profilerDone = branchTestPtr(Zero, regT0); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperation(operationProfileWillCall, regT1, regT0); + profilerDone.link(this); } void JIT::emit_op_profile_did_call(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_profile_did_call); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.call(); + load32(m_vm->enabledProfilerAddress(), regT0); + Jump profilerDone = branchTestPtr(Zero, regT0); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperation(operationProfileDidCall, regT1, regT0); + profilerDone.link(this); } -void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) +void JIT::emit_op_has_structure_property(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; - addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); - load32(payloadFor(JSStack::ArgumentCount), regT0); - sub32(TrustedImm32(1), regT0); - emitStoreInt32(dst, regT0); + int base = currentInstruction[2].u.operand; + int enumerator = currentInstruction[4].u.operand; + + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); + + emitLoadPayload(enumerator, regT1); + + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + move(TrustedImm32(1), regT0); + emitStoreBool(dst, regT0); } -void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) +{ + Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; + + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); + move(TrustedImm32(1), regT0); + Jump done = jump(); + + LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + + patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + + patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); + + byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( + m_codeBlock, patchBuffer, + ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); + + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); +} + +void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) { - linkSlowCase(iter); int dst = currentInstruction[1].u.operand; int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; + int property = currentInstruction[3].u.operand; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); + + emitLoadPayload(property, regT1); + + // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. + // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if + // number was signed since m_vectorLength is always less than intmax (since the total allocation + // size is always less than 4Gb). As such zero extending will have been correct (and extending the value + // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign + // extending since it makes it easier to re-tag the value in the slow case. + zeroExtend32ToPtr(regT1, regT1); + + emitArrayProfilingSiteWithCell(regT0, regT2, profile); + and32(TrustedImm32(IndexingShapeMask), regT2); + + JITArrayMode mode = chooseArrayMode(profile); + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); + move(TrustedImm32(1), regT0); + + addSlowCase(badType); + addSlowCase(slowCases); + + Label done = label(); + + emitStoreBool(dst, regT0); - JITStubCall stubCall(this, cti_op_get_by_id_generic); - stubCall.addArgument(base); - stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident)))); - stubCall.call(dst); + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } -void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector::iterator& iter) { int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; + int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; - addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); - emitLoad(property, regT1, regT2); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - add32(TrustedImm32(1), regT2); - // regT2 now contains the integer index of the argument we want, including this - load32(payloadFor(JSStack::ArgumentCount), regT3); - addSlowCase(branch32(AboveOrEqual, regT2, regT3)); + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base array check + linkSlowCase(iter); // vector length check + linkSlowCase(iter); // empty value + + Label slowPath = label(); + emitLoad(base, regT1, regT0); + emitLoad(property, regT3, regT2); + Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, profile); + + m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; + m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; + m_byValInstructionIndex++; +} + +void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int index = currentInstruction[4].u.operand; + int enumerator = currentInstruction[5].u.operand; + + // Check that base is a cell + emitLoadPayload(base, regT0); + emitJumpSlowCaseIfNotJSCell(base); + + // Check the structure + emitLoadPayload(enumerator, regT1); + load32(Address(regT0, JSCell::structureIDOffset()), regT2); + addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + // Compute the offset + emitLoadPayload(index, regT2); + // If index is less than the enumerator's cached inline storage, then it's an inline access + Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); + addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); + load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + + Jump done = jump(); + + // Otherwise it's out of line + outOfLineAccess.link(this); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); + sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2); neg32(regT2); - loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast(sizeof(Register))), regT0); - loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast(sizeof(Register))), regT1); + int32_t offsetOfFirstProperty = static_cast(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + + done.link(this); emitValueProfilingSite(); emitStore(dst, regT1, regT0); } -void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector::iterator& iter) +void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector::iterator& iter) { - unsigned dst = currentInstruction[1].u.operand; - unsigned arguments = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - + int base = currentInstruction[2].u.operand; + linkSlowCaseIfNotJSCell(iter, base); linkSlowCase(iter); - Jump skipArgumentsCreation = jump(); - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall(this, cti_op_create_arguments).call(); - emitStore(arguments, regT1, regT0); - emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0); - - skipArgumentsCreation.link(this); - JITStubCall stubCall(this, cti_op_get_by_val_generic); - stubCall.addArgument(arguments); - stubCall.addArgument(property); - stubCall.callWithValueProfiling(dst); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); + slowPathCall.call(); } -void JIT::emit_op_put_to_base(Instruction* currentInstruction) +void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) { - int base = currentInstruction[1].u.operand; - int id = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; - PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation; + emitLoadPayload(index, regT0); + emitLoadPayload(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); + + move(TrustedImm32(JSValue::NullTag), regT2); + move(TrustedImm32(0), regT0); + Jump done = jump(); + inBounds.link(this); - switch (operation->m_kind) { - case PutToBaseOperation::GlobalVariablePutChecked: - addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer))); - case PutToBaseOperation::GlobalVariablePut: { - JSGlobalObject* globalObject = m_codeBlock->globalObject(); - if (operation->m_isDynamic) - addSlowCase(branchPtr(NotEqual, payloadFor(base), TrustedImmPtr(globalObject))); + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); + move(TrustedImm32(JSValue::CellTag), regT2); - emitLoad(value, regT1, regT0); - storePtr(regT0, reinterpret_cast(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); - storePtr(regT1, reinterpret_cast(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); - if (Heap::isWriteBarrierEnabled()) - emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); - break; - } - case PutToBaseOperation::VariablePut: { - loadPtr(payloadFor(base), regT3); - emitLoad(value, regT1, regT0); - loadPtr(Address(regT3, JSVariableObject::offsetOfRegisters()), regT2); - store32(regT0, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(regT1, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - if (Heap::isWriteBarrierEnabled()) - emitWriteBarrier(regT3, regT1, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); - break; - } + done.link(this); + emitStore(dst, regT2, regT0); +} - case PutToBaseOperation::GlobalPropertyPut: { - JSGlobalObject* globalObject = m_codeBlock->globalObject(); - loadPtr(payloadFor(base), regT3); - emitLoad(value, regT1, regT0); - loadPtr(&operation->m_structure, regT2); - addSlowCase(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), regT2)); - ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity()); - loadPtr(Address(regT3, JSObject::butterflyOffset()), regT2); - load32(&operation->m_offsetInButterfly, regT3); - storePtr(regT0, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - storePtr(regT1, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - if (Heap::isWriteBarrierEnabled()) - emitWriteBarrier(globalObject, regT1, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); - break; - } +void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; - case PutToBaseOperation::Uninitialised: - case PutToBaseOperation::Readonly: - case PutToBaseOperation::Generic: - JITStubCall stubCall(this, cti_op_put_to_base); - - stubCall.addArgument(TrustedImm32(base)); - stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id))); - stubCall.addArgument(TrustedImm32(value)); - stubCall.addArgument(TrustedImmPtr(operation)); - stubCall.call(); - break; + emitLoadPayload(index, regT0); + emitLoadPayload(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + + move(TrustedImm32(JSValue::NullTag), regT2); + move(TrustedImm32(0), regT0); + + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); + move(TrustedImm32(JSValue::CellTag), regT2); + + done.link(this); + emitStore(dst, regT2, regT0); +} + +void JIT::emit_op_profile_type(Instruction* currentInstruction) +{ + TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; + int valueToProfile = currentInstruction[1].u.operand; + + // Load payload in T0. Load tag in T3. + emitLoadPayload(valueToProfile, regT0); + emitLoadTag(valueToProfile, regT3); + + JumpList jumpToEnd; + + // Compile in a predictive type check, if possible, to see if we can skip writing to the log. + // These typechecks are inlined to match those of the 32-bit JSValue type checks. + if (cachedTypeLocation->m_lastSeenType == TypeUndefined) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeNull) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag))); + else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); + else if (cachedTypeLocation->m_lastSeenType == TypeNumber) { + jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag))); + jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); + } else if (cachedTypeLocation->m_lastSeenType == TypeString) { + Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); + jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + isNotCell.link(this); } + + // Load the type profiling log into T2. + TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); + move(TrustedImmPtr(cachedTypeProfilerLog), regT2); + + // Load the next log entry into T1. + loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); + + // Store the JSValue onto the log entry. + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + + // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry. + Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + Jump skipNotCell = jump(); + notCell.link(this); + store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + skipNotCell.link(this); + + // Store the typeLocation on the log entry. + move(TrustedImmPtr(cachedTypeLocation), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); + store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); + jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()))); + // Clear the log if we're at the end of the log. + callOperation(operationProcessTypeProfilerLog); + + jumpToEnd.link(this); } } // namespace JSC