/*
- * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include "Operations.h"
+#include "DFGSpeculativeJIT.h"
+#include "JITOperations.h"
+#include "JSArray.h"
+#include "JSArrayIterator.h"
+#include "JSStack.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
#include "SpecializedThunkJIT.h"
#include <wtf/InlineASM.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
-static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
+inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
- jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
+ if (ASSERT_DISABLED)
+ return;
+ CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
+ jit.abortWithReason(TGInvalidPointer);
+ isNonZero.link(&jit);
+ jit.pushToSave(pointerGPR);
+ jit.load8(pointerGPR, pointerGPR);
+ jit.popToRestore(pointerGPR);
+}
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callNotJSFunction = jit.call();
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.ret();
+// We will jump here if the JIT code tries to make a call, but the
+// linking helper (C++ code) decides to throw an exception instead.
+MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
+{
+ CCallHelpers jit(vm);
- return callNotJSFunction;
+ // The call pushed a return address, so we need to pop it back off to re-align the stack,
+ // even though we won't use it.
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
+
+ jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ jit.jumpToExceptionHandler();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
-static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
+static void slowPathFor(
+ CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
{
- JSInterfaceJIT jit;
-
- JSInterfaceJIT::JumpList slowCase;
+ jit.emitFunctionPrologue();
+ jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+ jit.setupArgumentsWithExecState(GPRInfo::regT2);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-#if USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
-#else // USE(JSVALUE64)
- slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
-#endif // USE(JSVALUE64)
-
- // Finish canonical initialization before JS function call.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+ // This slow call will return the address of one of the following:
+ // 1) Exception throwing thunk.
+ // 2) Host call return value returner thingy.
+ // 3) The function to call.
+ emitPointerValidation(jit, GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.jump(GPRInfo::returnValueGPR);
+}
- // Also initialize ReturnPC for use by lazy linking and exceptions.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+static MacroAssemblerCodeRef linkForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
+{
+ // The return address is on the stack or in the link register. We will hence
+ // save the return address to the call frame while we make a C++ function call
+ // to perform linking and lazy compilation if necessary. We expect the callee
+ // to be in regT0/regT1 (payload/tag), the CallFrame to have already
+ // been adjusted, and all other registers to be available for use.
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callLazyLink = jit.call();
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.jump(JSInterfaceJIT::regT0);
+ CCallHelpers jit(vm);
- slowCase.link(&jit);
- JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
+ slowPathFor(jit, vm, operationLinkFor(kind, registers));
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- patchBuffer.link(callLazyLink, lazyLink);
- patchBuffer.link(callNotJSFunction, notJSFunction);
-
- return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(
+ patchBuffer,
+ ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
+}
+
+MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
+{
+ return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
}
-MacroAssemblerCodeRef linkCallGenerator(VM* vm)
+MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
+ return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
}
-MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
+MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
+ return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
}
-MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
+MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
+ return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
}
-static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
+static MacroAssemblerCodeRef linkClosureCallForThunkGenerator(
+ VM* vm, RegisterPreservationMode registers)
{
- JSInterfaceJIT jit;
+ CCallHelpers jit(vm);
+
+ slowPathFor(jit, vm, operationLinkClosureCallFor(registers));
- JSInterfaceJIT::JumpList slowCase;
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
+}
-#if USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
-#else // USE(JSVALUE64)
- slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
-#endif // USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
+// For closure optimizations, we only include calls, since if you're using closures for
+// object construction then you're going to lose big time anyway.
+MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
+{
+ return linkClosureCallForThunkGenerator(vm, RegisterPreservationNotRequired);
+}
- // Finish canonical initialization before JS function call.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+MacroAssemblerCodeRef linkClosureCallThatPreservesRegsThunkGenerator(VM* vm)
+{
+ return linkClosureCallForThunkGenerator(vm, MustPreserveRegisters);
+}
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callCompile = jit.call();
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
+static MacroAssemblerCodeRef virtualForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
+{
+ // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
+ // The return address is on the stack, or in the link register. We will hence
+ // jump to the callee, or save the return address to the call frame while we
+ // make a C++ function call to the appropriate JIT operation.
- hasCodeBlock1.link(&jit);
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
- jit.jump(JSInterfaceJIT::regT0);
+ CCallHelpers jit(vm);
+
+ CCallHelpers::JumpList slowCase;
+
+ // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
+ // slow path execution for the profiler.
+ jit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(GPRInfo::regT2, OBJECT_OFFSETOF(CallLinkInfo, slowPathCount)));
+
+ // FIXME: we should have a story for eliminating these checks. In many cases,
+ // the DFG knows that the value is definitely a cell, or definitely a function.
+
+#if USE(JSVALUE64)
+ jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
+
+ slowCase.append(
+ jit.branchTest64(
+ CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
+#else
+ slowCase.append(
+ jit.branch32(
+ CCallHelpers::NotEqual, GPRInfo::regT1,
+ CCallHelpers::TrustedImm32(JSValue::CellTag)));
+#endif
+ AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
+ slowCase.append(
+ jit.branchPtr(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
+ CCallHelpers::TrustedImmPtr(JSFunction::info())));
+
+ // Now we know we have a JSFunction.
+
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
+ GPRInfo::regT4);
+ jit.loadPtr(
+ CCallHelpers::Address(
+ GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
+ GPRInfo::regT4);
+ slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
+
+ // Now we know that we have a CodeBlock, and we're committed to making a fast
+ // call.
+
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
+ GPRInfo::regT1);
+#if USE(JSVALUE64)
+ jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
+#else
+ jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
+ jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
+ JSStack::ScopeChain);
+#endif
+ // Make a tail call. This will return back to JIT code.
+ emitPointerValidation(jit, GPRInfo::regT4);
+ jit.jump(GPRInfo::regT4);
+
slowCase.link(&jit);
- JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- patchBuffer.link(callCompile, compile);
- patchBuffer.link(callNotJSFunction, notJSFunction);
+ // Here we don't know anything, so revert to the full slow path.
+
+ slowPathFor(jit, vm, operationVirtualFor(kind, registers));
- return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(
+ patchBuffer,
+ ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
-MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
+MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
{
- return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
+ return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
}
-MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
+MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
{
- return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
+ return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
}
-MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
+MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
{
- JSInterfaceJIT jit;
-
-#if USE(JSVALUE64)
- // Check eax is a string
- JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
- JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
- JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
- JSInterfaceJIT::regT0, JSCell::structureOffset()),
- JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- jit.load32(
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
- JSInterfaceJIT::regT0);
-
- JSInterfaceJIT::Jump failureCases3 = jit.branch32(
- JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
-
-#else // USE(JSVALUE64)
- // regT0 holds payload, regT1 holds tag
-
- JSInterfaceJIT::Jump failureCases1 = jit.branch32(
- JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
- JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
- JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
- JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
- JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- jit.load32(
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
- JSInterfaceJIT::regT2);
-
- JSInterfaceJIT::Jump failureCases3 = jit.branch32(
- JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
-#endif // USE(JSVALUE64)
+ return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
+}
- jit.ret();
-
- JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
- JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
- JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
-
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
-
- patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-
- return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
+MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
+{
+ return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
}
-static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
+enum ThunkEntryType { EnterViaCall, EnterViaJump };
+
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
{
int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
- JSInterfaceJIT jit;
-
+ JSInterfaceJIT jit(vm);
+
+ if (entryType == EnterViaCall)
+ jit.emitFunctionPrologue();
+
jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
-
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
// call the function
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
#elif CPU(X86_64)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
-
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
-
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- // Leave space for the callee parameter home addresses and align the stack.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ // Leave space for the callee parameter home addresses.
+ // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
#elif CPU(ARM64)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, ARM64Registers::x3);
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(ARM64Registers::lr, JSStack::ReturnPC);
-
- // Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
- jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
+#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
-
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(SH4)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT4, r1 == regT5, ...);
- // Host function signature: f(ExecState*);
- jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
-
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
-
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
+#if CPU(MIPS)
+ // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+#endif
- // Setup arg0
- jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
+ // Calling convention is f(argumentGPR0, argumentGPR1, ...).
+ // Host function signature is f(ExecState*).
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
- // Call
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
- jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
+#if CPU(MIPS)
// Restore stack space
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+#endif
#else
#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
+ abortWithReason(TGNotSupported);
#endif
// Check for an exception
#if USE(JSVALUE64)
- jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
+ jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
#else
JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
#endif
+ jit.emitFunctionEpilogue();
// Return.
jit.ret();
// Handle an exception
exceptionHandler.link(&jit);
- // Grab the return address.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
-
- jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
- jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- // Set the return address.
- jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
- jit.ret();
+#if CPU(X86) && USE(JSVALUE32_64)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
+ jit.push(JSInterfaceJIT::regT0);
+#else
+#if OS(WINDOWS)
+ // Allocate space on stack for the 4 parameter registers.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
+#endif
+ jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
+ jit.call(JSInterfaceJIT::regT3);
+#if CPU(X86) && USE(JSVALUE32_64)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+#elif OS(WINDOWS)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
+ jit.jumpToExceptionHandler();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
return nativeForGenerator(vm, CodeForCall);
}
+MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForCall, EnterViaJump);
+}
+
MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
{
return nativeForGenerator(vm, CodeForConstruct);
}
+MacroAssemblerCodeRef arityFixup(VM* vm)
+{
+ JSInterfaceJIT jit(vm);
+
+ // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
+ // regT5 on 32-bit and regT7 on 64-bit.
+#if USE(JSVALUE64)
+# if CPU(X86_64)
+ jit.pop(JSInterfaceJIT::regT4);
+# endif
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
+ jit.neg64(JSInterfaceJIT::regT0);
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
+ JSInterfaceJIT::Label copyLoop(jit.label());
+ jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
+
+ // Fill in regT0 - 1 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
+ JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
+
+ // Adjust call frame register and stack pointer to account for missing args
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
+ jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
+ jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
+
+ // Save the original return PC.
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
+ jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+
+ // Install the new return PC.
+ jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
+
+# if CPU(X86_64)
+ jit.push(JSInterfaceJIT::regT4);
+# endif
+ jit.ret();
+#else
+# if CPU(X86)
+ jit.pop(JSInterfaceJIT::regT4);
+# endif
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
+ jit.neg32(JSInterfaceJIT::regT0);
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
+ JSInterfaceJIT::Label copyLoop(jit.label());
+ jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
+
+ // Fill in regT0 - 1 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
+ JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
+
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
+
+ // Adjust call frame register and stack pointer to account for missing args
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
+ jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
+
+ // Save the original return PC.
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
+ jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+
+ // Install the new return PC.
+ jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
+
+# if CPU(X86)
+ jit.push(JSInterfaceJIT::regT4);
+# endif
+ jit.ret();
+#endif
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("fixup arity"));
+}
+
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
}
MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
}
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
// load char code
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
}
MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPointSqrt())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
}
}
-#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "pushq %rax\n" \
"call " GLOBAL_REFERENCE(function) "\n" \
+ "popq %rcx\n" \
"ret\n" \
);\
extern "C" { \
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "subl $8, %esp\n" \
+ "subl $20, %esp\n" \
"movsd %xmm0, (%esp) \n" \
"call " GLOBAL_REFERENCE(function) "\n" \
"fstpl (%esp) \n" \
"movsd (%esp), %xmm0 \n" \
- "addl $8, %esp\n" \
+ "addl $20, %esp\n" \
"ret\n" \
);\
extern "C" { \
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
MacroAssembler::Jump nonIntJump;
if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
#endif // CPU(ARM64)
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
}
MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
}
MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
}
MacroAssemblerCodeRef expThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(exp))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
}
MacroAssemblerCodeRef logThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(log))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
}
MacroAssemblerCodeRef absThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPointAbs())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
}
MacroAssemblerCodeRef powThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(2);
+ SpecializedThunkJIT jit(vm, 2);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
MacroAssembler::Jump nonIntExponent;
jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
if (jit.supportsFloatingPointSqrt()) {
nonIntExponent.link(&jit);
- jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
}
MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(2);
+ SpecializedThunkJIT jit(vm, 2);
MacroAssembler::Jump nonIntArg0Jump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
SpecializedThunkJIT::Label doneLoadingArg0(&jit);
nonIntArg0Jump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
- jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
- jit.jump(doneLoadingArg0);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg0Jump);
nonIntArg1Jump.link(&jit);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
- jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
- jit.jump(doneLoadingArg1);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg1Jump);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
+}
+
+static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
+{
+ typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
+ typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
+ typedef SpecializedThunkJIT::Address Address;
+ typedef SpecializedThunkJIT::BaseIndex BaseIndex;
+ typedef SpecializedThunkJIT::Jump Jump;
+
+ SpecializedThunkJIT jit(vm);
+ // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
+ jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
+
+ // Early exit if we don't have a thunk for this form of iteration
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
+
+ jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
+
+ // Pull out the butterfly from iteratedObject
+ jit.load8(Address(SpecializedThunkJIT::regT0, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3);
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+
+ jit.and32(TrustedImm32(IndexingShapeMask), SpecializedThunkJIT::regT3);
+
+ Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
+ // Return the termination signal to indicate that we've finished
+ jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+
+ notDone.link(&jit);
+
+ if (kind == ArrayIterateKey) {
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT1);
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-key");
+
+ }
+ ASSERT(kind == ArrayIterateValue);
+
+ // Okay, now we're returning a value so make sure we're inside the vector size
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
+
+ // So now we perform inline loads for int32, value/undecided, and double storage
+ Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(UndecidedShape));
+ Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ContiguousShape));
+
+ undecidedStorage.link(&jit);
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+
+#if USE(JSVALUE64)
+ jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
+ Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
+ notHole.link(&jit);
+ jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0);
+#else
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
+ Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
+ notHole.link(&jit);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
+ jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+#endif
+ notContiguousStorage.link(&jit);
+
+ Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(Int32Shape));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ notInt32Storage.link(&jit);
+
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(DoubleShape)));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-value");
+}
+
+MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
}
+MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
+}
+
}
#endif // ENABLE(JIT)