/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "CodeBlock.h"
#include "CompactJITCodeMap.h"
#include "Interpreter.h"
+#include "JITDisassembler.h"
#include "JSInterfaceJIT.h"
+#include "LegacyProfiler.h"
#include "Opcode.h"
-#include "Profiler.h"
+#include "ResultType.h"
+#include "UnusedPointer.h"
#include <bytecode/SamplingTool.h>
namespace JSC {
class JIT;
class JSPropertyNameIterator;
class Interpreter;
+ class JSScope;
+ class JSStack;
+ class MarkedAllocator;
class Register;
- class RegisterFile;
- class ScopeChainNode;
class StructureChain;
struct CallLinkInfo;
enum PropertyStubPutById_T { PropertyStubPutById };
struct PropertyStubCompilationInfo {
- enum Type { GetById, PutById, MethodCheck } m_type;
+ enum Type { GetById, PutById } m_type;
unsigned bytecodeIndex;
MacroAssembler::Call callReturnLocation;
MacroAssembler::Label hotPathBegin;
MacroAssembler::DataLabelPtr getStructureToCompare;
MacroAssembler::PatchableJump getStructureCheck;
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad;
#if USE(JSVALUE64)
MacroAssembler::DataLabelCompact getDisplacementLabel;
#else
MacroAssembler::DataLabel32 putDisplacementLabel1;
MacroAssembler::DataLabel32 putDisplacementLabel2;
#endif
- MacroAssembler::DataLabelPtr methodCheckStructureToCompare;
- MacroAssembler::DataLabelPtr methodCheckProtoObj;
- MacroAssembler::DataLabelPtr methodCheckProtoStructureToCompare;
- MacroAssembler::DataLabelPtr methodCheckPutFunction;
#if !ASSERT_DISABLED
PropertyStubCompilationInfo()
#endif
- PropertyStubCompilationInfo(PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ PropertyStubCompilationInfo(
+ PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ MacroAssembler::DataLabelPtr structureToCompare,
+ MacroAssembler::PatchableJump structureCheck,
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
#if USE(JSVALUE64)
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel, MacroAssembler::Label putResult)
+ MacroAssembler::DataLabelCompact displacementLabel,
#else
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel1, MacroAssembler::DataLabelCompact displacementLabel2, MacroAssembler::Label putResult)
+ MacroAssembler::DataLabelCompact displacementLabel1,
+ MacroAssembler::DataLabelCompact displacementLabel2,
#endif
+ MacroAssembler::Label putResult)
: m_type(GetById)
, bytecodeIndex(bytecodeIndex)
, hotPathBegin(hotPathBegin)
, getStructureToCompare(structureToCompare)
, getStructureCheck(structureCheck)
+ , propertyStorageLoad(propertyStorageLoad)
#if USE(JSVALUE64)
, getDisplacementLabel(displacementLabel)
#else
{
}
- PropertyStubCompilationInfo(PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ PropertyStubCompilationInfo(
+ PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ MacroAssembler::DataLabelPtr structureToCompare,
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
#if USE(JSVALUE64)
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel)
+ MacroAssembler::DataLabel32 displacementLabel
#else
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel1, MacroAssembler::DataLabel32 displacementLabel2)
+ MacroAssembler::DataLabel32 displacementLabel1,
+ MacroAssembler::DataLabel32 displacementLabel2
#endif
+ )
: m_type(PutById)
, bytecodeIndex(bytecodeIndex)
, hotPathBegin(hotPathBegin)
+ , propertyStorageLoad(propertyStorageLoad)
, putStructureToCompare(structureToCompare)
#if USE(JSVALUE64)
, putDisplacementLabel(displacementLabel)
void slowCaseInfo(PropertyStubGetById_T, MacroAssembler::Label coldPathBegin, MacroAssembler::Call call)
{
- ASSERT(m_type == GetById || m_type == MethodCheck);
+ ASSERT(m_type == GetById);
callReturnLocation = call;
getColdPathBegin = coldPathBegin;
}
callReturnLocation = call;
}
- void addMethodCheckInfo(MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabelPtr protoObj, MacroAssembler::DataLabelPtr protoStructureToCompare, MacroAssembler::DataLabelPtr putFunction)
+ void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer);
+ };
+
+ struct ByValCompilationInfo {
+ ByValCompilationInfo() { }
+
+ ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget)
+ : bytecodeIndex(bytecodeIndex)
+ , badTypeJump(badTypeJump)
+ , arrayMode(arrayMode)
+ , doneTarget(doneTarget)
{
- m_type = MethodCheck;
- methodCheckStructureToCompare = structureToCompare;
- methodCheckProtoObj = protoObj;
- methodCheckProtoStructureToCompare = protoStructureToCompare;
- methodCheckPutFunction = putFunction;
}
-
- void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer);
+
+ unsigned bytecodeIndex;
+ MacroAssembler::PatchableJump badTypeJump;
+ JITArrayMode arrayMode;
+ MacroAssembler::Label doneTarget;
+ MacroAssembler::Label slowPathTarget;
+ MacroAssembler::Call returnAddress;
};
struct StructureStubCompilationInfo {
unsigned bytecodeIndex;
};
- struct MethodCallCompilationInfo {
- MethodCallCompilationInfo(unsigned bytecodeIndex, unsigned propertyAccessIndex)
- : bytecodeIndex(bytecodeIndex)
- , propertyAccessIndex(propertyAccessIndex)
- {
- }
-
- unsigned bytecodeIndex;
- MacroAssembler::DataLabelPtr structureToCompare;
- unsigned propertyAccessIndex;
- };
-
// Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
using MacroAssembler::JumpList;
using MacroAssembler::Label;
- static const int patchGetByIdDefaultStructure = -1;
+ static const uintptr_t patchGetByIdDefaultStructure = unusedPointer;
static const int patchGetByIdDefaultOffset = 0;
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchPutByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
+ static JITCode compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
+ {
+ return JIT(vm, codeBlock).privateCompile(functionEntryArityCheck, effort);
+ }
+
+ static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
- return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck, effort);
+ JIT jit(vm, callerCodeBlock);
+ jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex;
+ jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdProto(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdSelfList(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
}
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdProtoList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdChainList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdChain(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ static void compilePutByIdTransition(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
}
+
+ static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ {
+ JIT jit(vm, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ }
- static PassRefPtr<ExecutableMemoryHandle> compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
+ static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- if (!globalData->canUseJIT())
- return 0;
- JIT jit(globalData, 0);
- return jit.privateCompileCTIMachineTrampolines(globalData, trampolines);
+ JIT jit(vm, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
- static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
+ static CodeRef compileCTINativeCall(VM* vm, NativeFunction func)
{
- if (!globalData->canUseJIT()) {
+ if (!vm->canUseJIT()) {
#if ENABLE(LLINT)
return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
#else
return CodeRef();
#endif
}
- JIT jit(globalData, 0);
- return jit.privateCompileCTINativeCall(globalData, func);
+ JIT jit(vm, 0);
+ return jit.privateCompileCTINativeCall(vm, func);
}
static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*);
static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*);
- static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
- static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
- static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr);
+ static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr);
+ static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct);
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ static void compilePatchGetArrayLength(VM* vm, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
- JIT jit(globalData, codeBlock);
+ JIT jit(vm, codeBlock);
+#if ENABLE(DFG_JIT)
+ // Force profiling to be enabled during stub generation.
+ jit.m_canBeOptimized = true;
+ jit.m_canBeOptimizedOrInlined = true;
+ jit.m_shouldEmitProfiling = true;
+#endif // ENABLE(DFG_JIT)
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, JSGlobalData*, CodeSpecializationKind);
+ static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
+ static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
private:
- JIT(JSGlobalData*, CodeBlock* = 0);
+ JIT(VM*, CodeBlock* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort);
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
-
- PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
- Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
- CodeRef privateCompileCTINativeCall(JSGlobalData*, NativeFunction);
+
+ void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
+
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct);
+
+ void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+
+ Label privateCompileCTINativeCall(VM*, bool isConstruct = false);
+ CodeRef privateCompileCTINativeCall(VM*, NativeFunction);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
static bool isDirectPutById(StructureStubInfo*);
void emitLoadDouble(int index, FPRegisterID value);
void emitLoadInt32ToDouble(int index, FPRegisterID value);
Jump emitJumpIfNotObject(RegisterID structureReg);
- Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
- void testPrototype(JSValue, JumpList& failureCases);
+ Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch);
+ void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch);
+ void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*);
enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates };
// value register in write barrier is used before any scratch registers
void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
- template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
- void emitAllocateBasicStorage(size_t, RegisterID result, RegisterID storagePtr);
- template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
- void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr);
- void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr);
+ template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
+ void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
void emitValueProfilingSite(unsigned) { }
void emitValueProfilingSite() { }
#endif
+ void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
+ void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
+ void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
+ void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*);
+
+ JITArrayMode chooseArrayMode(ArrayProfile*);
+
+ // Property is in regT1, base is in regT0. regT2 contains indexing type.
+ // Property is int-checked and zero extended. Base is cell checked.
+ // Structure is already profiled. Returns the slow cases. Fall-through
+ // case contains result in regT0, and it is not yet profiled.
+ JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }
+ JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
+ JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness);
+ JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
+
+ // Property is in regT0, base is in regT0. regT2 contains indecing type.
+ // The value to store is not yet loaded. Property is int-checked and
+ // zero-extended. Base is cell checked. Structure is already profiled.
+ // returns the slow cases.
+ JumpList emitInt32PutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType, Int32Shape);
+ }
+ JumpList emitDoublePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType, DoubleShape);
+ }
+ JumpList emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType);
+ }
+ JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape);
+ JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
+ JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding);
+ JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
+
+ enum FinalObjectMode { MayBeFinal, KnownNotFinal };
#if USE(JSVALUE32_64)
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
bool isMapped(int virtualRegisterIndex);
bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload);
bool getMappedTag(int virtualRegisterIndex, RegisterID& tag);
-
+
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex);
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
- void compileGetByIdHotPath();
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset);
- void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset);
+ void compileGetByIdHotPath(Identifier*);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset);
// Arithmetic opcode helpers
void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
// sequenceOpCall
static const int sequenceOpCallInstructionSpace = 12;
static const int sequenceOpCallConstantSpace = 2;
- // sequenceMethodCheck
- static const int sequenceMethodCheckInstructionSpace = 40;
- static const int sequenceMethodCheckConstantSpace = 6;
// sequenceGetByIdHotPath
static const int sequenceGetByIdHotPathInstructionSpace = 36;
static const int sequenceGetByIdHotPathConstantSpace = 4;
// sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 64;
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 80;
static const int sequenceGetByIdSlowCaseConstantSpace = 4;
// sequencePutById
static const int sequencePutByIdInstructionSpace = 36;
// sequenceOpCall
static const int sequenceOpCallInstructionSpace = 12;
static const int sequenceOpCallConstantSpace = 2;
- // sequenceMethodCheck
- static const int sequenceMethodCheckInstructionSpace = 40;
- static const int sequenceMethodCheckConstantSpace = 6;
// sequenceGetByIdHotPath
static const int sequenceGetByIdHotPathInstructionSpace = 36;
static const int sequenceGetByIdHotPathConstantSpace = 5;
#endif
#else // USE(JSVALUE32_64)
+ /* This function is deprecated. */
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
void emitGetVirtualRegister(int src, RegisterID dst);
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
Jump emitJumpIfJSCell(RegisterID);
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfJSCell(RegisterID);
- Jump emitJumpIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
Jump emitJumpIfImmediateInteger(RegisterID);
void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
void emitTagAsBoolImmediate(RegisterID reg);
void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
void compileGetByIdHotPath(int baseVReg, Identifier*);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
- void compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch);
- void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset);
#endif // USE(JSVALUE32_64)
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
void emit_op_get_by_pname(Instruction*);
- void emit_op_get_global_var(Instruction*);
- void emit_op_get_scoped_var(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
void emit_op_check_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
void emit_op_jmp(Instruction*);
- void emit_op_jmp_scopes(Instruction*);
void emit_op_jneq_null(Instruction*);
void emit_op_jneq_ptr(Instruction*);
void emit_op_jless(Instruction*);
void emit_op_jngreater(Instruction*);
void emit_op_jngreatereq(Instruction*);
void emit_op_jtrue(Instruction*);
- void emit_op_loop(Instruction*);
void emit_op_loop_hint(Instruction*);
- void emit_op_loop_if_less(Instruction*);
- void emit_op_loop_if_lesseq(Instruction*);
- void emit_op_loop_if_greater(Instruction*);
- void emit_op_loop_if_greatereq(Instruction*);
- void emit_op_loop_if_true(Instruction*);
- void emit_op_loop_if_false(Instruction*);
void emit_op_lshift(Instruction*);
- void emit_op_method_check(Instruction*);
void emit_op_mod(Instruction*);
void emit_op_mov(Instruction*);
void emit_op_mul(Instruction*);
void emit_op_neq(Instruction*);
void emit_op_neq_null(Instruction*);
void emit_op_new_array(Instruction*);
+ void emit_op_new_array_with_size(Instruction*);
void emit_op_new_array_buffer(Instruction*);
void emit_op_new_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
void emit_op_pop_scope(Instruction*);
- void emit_op_post_dec(Instruction*);
- void emit_op_post_inc(Instruction*);
- void emit_op_pre_dec(Instruction*);
- void emit_op_pre_inc(Instruction*);
+ void emit_op_dec(Instruction*);
+ void emit_op_inc(Instruction*);
void emit_op_profile_did_call(Instruction*);
void emit_op_profile_will_call(Instruction*);
- void emit_op_push_new_scope(Instruction*);
- void emit_op_push_scope(Instruction*);
+ void emit_op_push_name_scope(Instruction*);
+ void emit_op_push_with_scope(Instruction*);
void emit_op_put_by_id(Instruction*);
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
void emit_op_put_getter_setter(Instruction*);
- void emit_op_put_global_var(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
+ void emit_op_init_global_const(Instruction*);
+ void emit_op_init_global_const_check(Instruction*);
+ void emit_resolve_operations(ResolveOperations*, const int* base, const int* value);
+ void emitSlow_link_resolve_operations(ResolveOperations*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
- void emit_op_ensure_property_exists(Instruction*);
- void emit_op_resolve_global(Instruction*, bool dynamic = false);
- void emit_op_resolve_global_dynamic(Instruction*);
- void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_resolve_with_this(Instruction*);
+ void emit_op_put_to_base(Instruction*);
void emit_op_ret(Instruction*);
void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
void emit_op_tear_off_activation(Instruction*);
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
- void emit_op_throw_reference_error(Instruction*);
- void emit_op_to_jsnumber(Instruction*);
+ void emit_op_throw_static_error(Instruction*);
+ void emit_op_to_number(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
void emit_op_urshift(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jngreater(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_greater(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_greatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_new_object(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_init_global_const_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_func(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_func_exp(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
+
+ void emitSlow_op_resolve(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_to_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
- /* This function is deprecated. */
- void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
-
void emitInitRegister(unsigned dst);
- void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
- void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
- void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
- void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
- void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
- void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#endif
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
Jump checkStructure(RegisterID reg, Structure* structure);
- void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
void updateTopCallFrame();
Call emitNakedCall(CodePtr function = CodePtr());
- void preserveReturnAddressAfterCall(RegisterID);
- void restoreReturnAddressBeforeReturn(RegisterID);
- void restoreReturnAddressBeforeReturn(Address);
-
// Loads the character value of a single character string into dst.
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
- enum OptimizationCheckKind { LoopOptimizationCheck, RetOptimizationCheck };
#if ENABLE(DFG_JIT)
- void emitOptimizationCheck(OptimizationCheckKind);
+ void emitEnterOptimizationCheck();
#else
- void emitOptimizationCheck(OptimizationCheckKind) { }
+ void emitEnterOptimizationCheck() { }
#endif
-
- void emitTimeoutCheck();
+
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
#endif
#if ENABLE(DFG_JIT)
bool canBeOptimized() { return m_canBeOptimized; }
- bool shouldEmitProfiling() { return m_canBeOptimized; }
+ bool canBeOptimizedOrInlined() { return m_canBeOptimizedOrInlined; }
+ bool shouldEmitProfiling() { return m_shouldEmitProfiling; }
#else
bool canBeOptimized() { return false; }
+ bool canBeOptimizedOrInlined() { return false; }
// Enables use of value profiler with tiered compilation turned off,
// in which case all code gets profiled.
- bool shouldEmitProfiling() { return true; }
+ bool shouldEmitProfiling() { return false; }
#endif
Interpreter* m_interpreter;
- JSGlobalData* m_globalData;
+ VM* m_vm;
CodeBlock* m_codeBlock;
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<ByValCompilationInfo> m_byValCompilationInfo;
Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
- Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
Vector<JumpTable> m_jmpTable;
unsigned m_bytecodeOffset;
Vector<SwitchRecord> m_switches;
unsigned m_propertyAccessInstructionIndex;
+ unsigned m_byValInstructionIndex;
unsigned m_globalResolveInfoIndex;
unsigned m_callLinkInfoIndex;
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ OwnPtr<JITDisassembler> m_disassembler;
+ RefPtr<Profiler::Compilation> m_compilation;
WeakRandom m_randomGenerator;
- static CodeRef stringGetByValStubGenerator(JSGlobalData*);
+ static CodeRef stringGetByValStubGenerator(VM*);
#if ENABLE(VALUE_PROFILER)
bool m_canBeOptimized;
+ bool m_canBeOptimizedOrInlined;
+ bool m_shouldEmitProfiling;
#endif
} JIT_CLASS_ALIGNMENT;
- inline void JIT::emit_op_loop(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jmp(currentInstruction);
- }
-
- inline void JIT::emit_op_loop_hint(Instruction*)
- {
- emitOptimizationCheck(LoopOptimizationCheck);
- }
-
- inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jtrue(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jtrue(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jfalse(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jfalse(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jless(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jless(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jlesseq(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jlesseq(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_greater(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jgreater(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_greater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jgreater(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_greatereq(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jgreatereq(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_greatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jgreatereq(currentInstruction, iter);
- }
-
} // namespace JSC
#endif // ENABLE(JIT)