/*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "CodeBlockWithJITType.h"
#include "DFGAbstractInterpreterInlines.h"
#include "DFGInPlaceAbstractState.h"
+#include "DFGOSRAvailabilityAnalysisPhase.h"
+#include "DFGOSRExitFuzz.h"
+#include "DirectArguments.h"
#include "FTLAbstractHeapRepository.h"
#include "FTLAvailableRecovery.h"
#include "FTLForOSREntryJITCode.h"
#include "FTLFormattedValue.h"
#include "FTLInlineCacheSize.h"
#include "FTLLoweredNodeValue.h"
+#include "FTLOperations.h"
#include "FTLOutput.h"
#include "FTLThunks.h"
#include "FTLWeightedTarget.h"
-#include "OperandsInlines.h"
#include "JSCInlines.h"
+#include "JSLexicalEnvironment.h"
+#include "OperandsInlines.h"
+#include "ScopedArguments.h"
+#include "ScopedArgumentsTable.h"
#include "VirtualRegister.h"
#include <atomic>
+#include <dlfcn.h>
+#include <llvm/InitializeLLVM.h>
+#include <unordered_set>
#include <wtf/ProcessID.h>
+#if ENABLE(FTL_NATIVE_CALL_INLINING)
+#include "BundlePath.h"
+#endif
+
namespace JSC { namespace FTL {
using namespace DFG;
-static std::atomic<int> compileCounter;
+namespace {
+
+std::atomic<int> compileCounter;
+
+#if ASSERT_DISABLED
+NO_RETURN_DUE_TO_CRASH static void ftlUnreachable()
+{
+ CRASH();
+}
+#else
+NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
+ CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
+{
+ dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
+ if (nodeIndex != UINT_MAX)
+ dataLog(", node @", nodeIndex);
+ dataLog(".\n");
+ CRASH();
+}
+#endif
// Using this instead of typeCheck() helps to reduce the load on LLVM, by creating
// significantly less dead code.
LowerDFGToLLVM(State& state)
: m_graph(state.graph)
, m_ftlState(state)
- , m_loweringSucceeded(true)
, m_heaps(state.context)
, m_out(state.context)
- , m_availability(OperandsLike, state.graph.block(0)->variablesAtHead)
, m_state(state.graph)
, m_interpreter(state.graph, m_state)
, m_stackmapIDs(0)
+ , m_tbaaKind(mdKindID(state.context, "tbaa"))
+ , m_tbaaStructKind(mdKindID(state.context, "tbaa.struct"))
{
}
-
-
-#define LOWERING_FAILED(node, reason) \
- loweringFailed((node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason));
-
- bool lower()
+
+ void lower()
{
CString name;
if (verboseCompilationEnabled()) {
m_graph.m_dominators.computeIfNecessary(m_graph);
m_ftlState.module =
- llvm->ModuleCreateWithNameInContext(name.data(), m_ftlState.context);
+ moduleCreateWithNameInContext(name.data(), m_ftlState.context);
m_ftlState.function = addFunction(
m_ftlState.module, name.data(), functionType(m_out.int64));
m_prologue = FTL_NEW_BLOCK(m_out, ("Prologue"));
LBasicBlock stackOverflow = FTL_NEW_BLOCK(m_out, ("Stack overflow"));
m_handleExceptions = FTL_NEW_BLOCK(m_out, ("Handle Exceptions"));
+
+ LBasicBlock checkArguments = FTL_NEW_BLOCK(m_out, ("Check arguments"));
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
m_highBlock = m_graph.block(blockIndex);
m_out.appendTo(m_prologue, stackOverflow);
createPhiVariables();
+
+ auto preOrder = m_graph.blocksInPreOrder();
+
+ int maxNumberOfArguments = -1;
+ for (BasicBlock* block : preOrder) {
+ for (unsigned nodeIndex = block->size(); nodeIndex--; ) {
+ Node* node = block->at(nodeIndex);
+ switch (node->op()) {
+ case NativeCall:
+ case NativeConstruct: {
+ int numArgs = node->numChildren();
+ if (numArgs > maxNumberOfArguments)
+ maxNumberOfArguments = numArgs;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+ if (maxNumberOfArguments >= 0) {
+ m_execState = m_out.alloca(arrayType(m_out.int64, JSStack::CallFrameHeaderSize + maxNumberOfArguments));
+ m_execStorage = m_out.ptrToInt(m_execState, m_out.intPtr);
+ }
+
LValue capturedAlloca = m_out.alloca(arrayType(m_out.int64, m_graph.m_nextMachineLocal));
+
m_captured = m_out.add(
m_out.ptrToInt(capturedAlloca, m_out.intPtr),
m_out.constIntPtr(m_graph.m_nextMachineLocal * sizeof(Register)));
- // We should not create any alloca's after this point, since they will cease to
- // be mem2reg candidates.
-
m_ftlState.capturedStackmapID = m_stackmapIDs++;
m_out.call(
m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.capturedStackmapID),
m_out.int32Zero, capturedAlloca);
+ // If we have any CallVarargs then we nee to have a spill slot for it.
+ bool hasVarargs = false;
+ for (BasicBlock* block : preOrder) {
+ for (Node* node : *block) {
+ switch (node->op()) {
+ case CallVarargs:
+ case CallForwardVarargs:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ hasVarargs = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ if (hasVarargs) {
+ LValue varargsSpillSlots = m_out.alloca(
+ arrayType(m_out.int64, JSCallVarargs::numSpillSlotsNeeded()));
+ m_ftlState.varargsSpillSlotsStackmapID = m_stackmapIDs++;
+ m_out.call(
+ m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.varargsSpillSlotsStackmapID),
+ m_out.int32Zero, varargsSpillSlots);
+ }
+
+ // We should not create any alloca's after this point, since they will cease to
+ // be mem2reg candidates.
+
m_callFrame = m_out.ptrToInt(
m_out.call(m_out.frameAddressIntrinsic(), m_out.int32Zero), m_out.intPtr);
m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(JSStack::CodeBlock));
m_out.branch(
- didOverflowStack(), rarely(stackOverflow), usually(lowBlock(m_graph.block(0))));
+ didOverflowStack(), rarely(stackOverflow), usually(checkArguments));
m_out.appendTo(stackOverflow, m_handleExceptions);
m_out.call(m_out.operation(operationThrowStackOverflowError), m_callFrame, m_out.constIntPtr(codeBlock()));
m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
m_out.unreachable();
- m_out.appendTo(m_handleExceptions, lowBlock(m_graph.block(0)));
+ m_out.appendTo(m_handleExceptions, checkArguments);
m_ftlState.handleExceptionStackmapID = m_stackmapIDs++;
m_out.call(
m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.handleExceptionStackmapID),
m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
m_out.unreachable();
- if (!m_loweringSucceeded)
- return m_loweringSucceeded;
-
- Vector<BasicBlock*> depthFirst;
- m_graph.getBlocksInDepthFirstOrder(depthFirst);
- for (unsigned i = 0; i < depthFirst.size(); ++i) {
- compileBlock(depthFirst[i]);
- if (!m_loweringSucceeded)
- return m_loweringSucceeded;
+ m_out.appendTo(checkArguments, lowBlock(m_graph.block(0)));
+ availabilityMap().clear();
+ availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ availabilityMap().m_locals.argument(i) =
+ Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
+ }
+ m_codeOriginForExitTarget = CodeOrigin(0);
+ m_codeOriginForExitProfile = CodeOrigin(0);
+ m_node = nullptr;
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ Node* node = m_graph.m_arguments[i];
+ VirtualRegister operand = virtualRegisterForArgument(i);
+
+ LValue jsValue = m_out.load64(addressFor(operand));
+
+ if (node) {
+ DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
+
+ // This is a hack, but it's an effective one. It allows us to do CSE on the
+ // primordial load of arguments. This assumes that the GetLocal that got put in
+ // place of the original SetArgument doesn't have any effects before it. This
+ // should hold true.
+ m_loadedArgumentValues.add(node, jsValue);
+ }
+
+ switch (m_graph.m_argumentFormats[i]) {
+ case FlushedInt32:
+ speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
+ break;
+ case FlushedBoolean:
+ speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
+ break;
+ case FlushedCell:
+ speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
+ break;
+ case FlushedJSValue:
+ break;
+ default:
+ DFG_CRASH(m_graph, node, "Bad flush format for argument");
+ break;
+ }
}
+ m_out.jump(lowBlock(m_graph.block(0)));
+
+ for (BasicBlock* block : preOrder)
+ compileBlock(block);
if (Options::dumpLLVMIR())
dumpModule(m_ftlState.module);
m_ftlState.dumpState("after lowering");
if (validationEnabled())
verifyModule(m_ftlState.module);
-
- return m_loweringSucceeded;
}
private:
type = m_out.int64;
break;
default:
- LOWERING_FAILED(node, "Bad Phi node result type");
- return;
+ DFG_CRASH(m_graph, node, "Bad Phi node result type");
+ break;
}
m_phis.add(node, buildAlloca(m_out.m_builder, type));
}
m_out.appendTo(lowBlock, m_nextLowBlock);
if (Options::ftlCrashes())
- m_out.crashNonTerminal();
+ m_out.trap();
if (!m_highBlock->cfaHasVisited) {
- m_out.crash();
+ if (verboseCompilationEnabled())
+ dataLog("Bailing because CFA didn't reach.\n");
+ crash(m_highBlock->index, UINT_MAX);
return;
}
- initializeOSRExitStateForBlock();
+ m_availabilityCalculator.beginBlock(m_highBlock);
m_state.reset();
m_state.beginBasicBlock(m_highBlock);
break;
}
}
-
+
+ void safelyInvalidateAfterTermination()
+ {
+ if (verboseCompilationEnabled())
+ dataLog("Bailing.\n");
+ crash();
+
+ // Invalidate dominated blocks. Under normal circumstances we would expect
+ // them to be invalidated already. But you can have the CFA become more
+ // precise over time because the structures of objects change on the main
+ // thread. Failing to do this would result in weird crashes due to a value
+ // being used but not defined. Race conditions FTW!
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ BasicBlock* target = m_graph.block(blockIndex);
+ if (!target)
+ continue;
+ if (m_graph.m_dominators.dominates(m_highBlock, target)) {
+ if (verboseCompilationEnabled())
+ dataLog("Block ", *target, " will bail also.\n");
+ target->cfaHasVisited = false;
+ }
+ }
+ }
+
bool compileNode(unsigned nodeIndex)
{
if (!m_state.isValid()) {
- m_out.unreachable();
+ safelyInvalidateAfterTermination();
return false;
}
m_availableRecoveries.resize(0);
- bool shouldExecuteEffects = m_interpreter.startExecuting(m_node);
+ m_interpreter.startExecuting();
switch (m_node->op()) {
case Upsilon:
case Int52Constant:
compileInt52Constant();
break;
- case WeakJSConstant:
- compileWeakJSConstant();
- break;
- case PhantomArguments:
- compilePhantomArguments();
- break;
case DoubleRep:
compileDoubleRep();
break;
+ case DoubleAsInt32:
+ compileDoubleAsInt32();
+ break;
case ValueRep:
compileValueRep();
break;
case BooleanToNumber:
compileBooleanToNumber();
break;
- case GetArgument:
- compileGetArgument();
- break;
case ExtractOSREntryLocal:
compileExtractOSREntryLocal();
break;
- case GetLocal:
- compileGetLocal();
- break;
- case SetLocal:
- compileSetLocal();
- break;
- case MovHint:
- compileMovHint();
- break;
- case GetMyArgumentsLength:
- compileGetMyArgumentsLength();
- break;
- case GetMyArgumentByVal:
- compileGetMyArgumentByVal();
+ case GetStack:
+ compileGetStack();
break;
- case ZombieHint:
- compileZombieHint();
+ case PutStack:
+ compilePutStack();
break;
- case Phantom:
- case HardPhantom:
- compilePhantom();
+ case Check:
+ compileNoOp();
break;
case ToThis:
compileToThis();
case ArithSub:
compileArithAddOrSub();
break;
+ case ArithClz32:
+ compileArithClz32();
+ break;
case ArithMul:
compileArithMul();
break;
case ArithCos:
compileArithCos();
break;
+ case ArithPow:
+ compileArithPow();
+ break;
+ case ArithRound:
+ compileArithRound();
+ break;
case ArithSqrt:
compileArithSqrt();
break;
+ case ArithLog:
+ compileArithLog();
+ break;
case ArithFRound:
compileArithFRound();
break;
case CheckStructure:
compileCheckStructure();
break;
- case StructureTransitionWatchpoint:
- compileStructureTransitionWatchpoint();
+ case CheckCell:
+ compileCheckCell();
+ break;
+ case CheckNotEmpty:
+ compileCheckNotEmpty();
break;
- case CheckFunction:
- compileCheckFunction();
+ case CheckBadCell:
+ compileCheckBadCell();
break;
- case CheckExecutable:
- compileCheckExecutable();
+ case GetExecutable:
+ compileGetExecutable();
break;
case ArrayifyToStructure:
compileArrayifyToStructure();
case PutStructure:
compilePutStructure();
break;
- case PhantomPutStructure:
- compilePhantomPutStructure();
- break;
case GetById:
compileGetById();
break;
- case PutByIdDirect:
+ case In:
+ compileIn();
+ break;
case PutById:
+ case PutByIdDirect:
compilePutById();
break;
case GetButterfly:
case GetByVal:
compileGetByVal();
break;
+ case GetMyArgumentByVal:
+ compileGetMyArgumentByVal();
+ break;
case PutByVal:
case PutByValAlias:
case PutByValDirect:
case ArrayPop:
compileArrayPop();
break;
+ case CreateActivation:
+ compileCreateActivation();
+ break;
+ case NewFunction:
+ compileNewFunction();
+ break;
+ case CreateDirectArguments:
+ compileCreateDirectArguments();
+ break;
+ case CreateScopedArguments:
+ compileCreateScopedArguments();
+ break;
+ case CreateClonedArguments:
+ compileCreateClonedArguments();
+ break;
case NewObject:
compileNewObject();
break;
compileReallocatePropertyStorage();
break;
case ToString:
- compileToString();
+ case CallStringConstructor:
+ compileToStringOrCallStringConstructor();
break;
case ToPrimitive:
compileToPrimitive();
compileStringCharCodeAt();
break;
case GetByOffset:
+ case GetGetterSetterByOffset:
compileGetByOffset();
break;
+ case GetGetter:
+ compileGetGetter();
+ break;
+ case GetSetter:
+ compileGetSetter();
+ break;
case MultiGetByOffset:
compileMultiGetByOffset();
break;
case GetCallee:
compileGetCallee();
break;
+ case GetArgumentCount:
+ compileGetArgumentCount();
+ break;
case GetScope:
compileGetScope();
break;
- case GetMyScope:
- compileGetMyScope();
- break;
case SkipScope:
compileSkipScope();
break;
- case GetClosureRegisters:
- compileGetClosureRegisters();
- break;
case GetClosureVar:
compileGetClosureVar();
break;
case PutClosureVar:
compilePutClosureVar();
break;
+ case GetFromArguments:
+ compileGetFromArguments();
+ break;
+ case PutToArguments:
+ compilePutToArguments();
+ break;
case CompareEq:
compileCompareEq();
break;
case Construct:
compileCallOrConstruct();
break;
+ case CallVarargs:
+ case CallForwardVarargs:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ compileCallOrConstructVarargs();
+ break;
+ case LoadVarargs:
+ compileLoadVarargs();
+ break;
+ case ForwardVarargs:
+ compileForwardVarargs();
+ break;
+#if ENABLE(FTL_NATIVE_CALL_INLINING)
+ case NativeCall:
+ case NativeConstruct:
+ compileNativeCallOrConstruct();
+ break;
+#endif
case Jump:
compileJump();
break;
case InvalidationPoint:
compileInvalidationPoint();
break;
- case CheckArgumentsNotCreated:
- compileCheckArgumentsNotCreated();
- break;
case IsUndefined:
compileIsUndefined();
break;
case IsObject:
compileIsObject();
break;
+ case IsObjectOrNull:
+ compileIsObjectOrNull();
+ break;
case IsFunction:
compileIsFunction();
break;
+ case TypeOf:
+ compileTypeOf();
+ break;
case CheckHasInstance:
compileCheckHasInstance();
break;
case StoreBarrier:
compileStoreBarrier();
break;
- case StoreBarrierWithNullCheck:
- compileStoreBarrierWithNullCheck();
+ case HasIndexedProperty:
+ compileHasIndexedProperty();
+ break;
+ case HasGenericProperty:
+ compileHasGenericProperty();
+ break;
+ case HasStructureProperty:
+ compileHasStructureProperty();
+ break;
+ case GetDirectPname:
+ compileGetDirectPname();
break;
+ case GetEnumerableLength:
+ compileGetEnumerableLength();
+ break;
+ case GetPropertyEnumerator:
+ compileGetPropertyEnumerator();
+ break;
+ case GetEnumeratorStructurePname:
+ compileGetEnumeratorStructurePname();
+ break;
+ case GetEnumeratorGenericPname:
+ compileGetEnumeratorGenericPname();
+ break;
+ case ToIndexString:
+ compileToIndexString();
+ break;
+ case CheckStructureImmediate:
+ compileCheckStructureImmediate();
+ break;
+ case MaterializeNewObject:
+ compileMaterializeNewObject();
+ break;
+ case MaterializeCreateActivation:
+ compileMaterializeCreateActivation();
+ break;
+
case PhantomLocal:
- case SetArgument:
case LoopHint:
- case VariableWatchpoint:
- case FunctionReentryWatchpoint:
- case TypedArrayWatchpoint:
- case AllocationProfileWatchpoint:
+ case MovHint:
+ case ZombieHint:
+ case PhantomNewObject:
+ case PhantomNewFunction:
+ case PhantomCreateActivation:
+ case PhantomDirectArguments:
+ case PhantomClonedArguments:
+ case PutHint:
+ case BottomValue:
+ case KillStack:
break;
default:
- LOWERING_FAILED(m_node, "Unrecognized node in FTL backend");
+ DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
break;
}
- if (!m_loweringSucceeded)
+ if (m_node->isTerminal())
+ return false;
+
+ if (!m_state.isValid()) {
+ safelyInvalidateAfterTermination();
return false;
+ }
- if (shouldExecuteEffects)
- m_interpreter.executeEffects(nodeIndex);
+ m_availabilityCalculator.executeNode(m_node);
+ m_interpreter.executeEffects(nodeIndex);
return true;
}
m_out.set(lowJSValue(m_node->child1()), destination);
break;
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
setJSValue(m_out.get(source));
break;
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
void compileDoubleConstant()
{
- setDouble(m_out.constDouble(m_graph.valueOfNumberConstant(m_node)));
+ setDouble(m_out.constDouble(m_node->asNumber()));
}
void compileInt52Constant()
{
- int64_t value = m_graph.valueOfJSConstant(m_node).asMachineInt();
+ int64_t value = m_node->asMachineInt();
setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
setStrictInt52(m_out.constInt64(value));
}
- void compileWeakJSConstant()
- {
- setJSValue(weakPointer(m_node->weakConstant()));
- }
-
- void compilePhantomArguments()
- {
- setJSValue(m_out.constInt64(JSValue::encode(JSValue())));
- }
-
void compileDoubleRep()
{
switch (m_node->child1().useKind()) {
+ case RealNumberUse: {
+ LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+
+ LValue doubleValue = unboxDouble(value);
+
+ LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("DoubleRep RealNumberUse int case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("DoubleRep continuation"));
+
+ ValueFromBlock fastResult = m_out.anchor(doubleValue);
+ m_out.branch(
+ m_out.doubleEqual(doubleValue, doubleValue),
+ usually(continuation), rarely(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
+
+ FTL_TYPE_CHECK(
+ jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
+ isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
+ ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ setDouble(m_out.phi(m_out.doubleType, fastResult, slowResult));
+ return;
+ }
+
+ case NotCellUse:
case NumberUse: {
+ bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
+
LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
- setDouble(jsValueToDouble(m_node->child1(), value));
+
+ LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing int case"));
+ LBasicBlock doubleTesting = FTL_NEW_BLOCK(m_out, ("jsValueToDouble testing double case"));
+ LBasicBlock doubleCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing double case"));
+ LBasicBlock nonDoubleCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble testing undefined case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing continuation"));
+
+ m_out.branch(
+ isNotInt32(value, provenType(m_node->child1())),
+ unsure(doubleTesting), unsure(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
+
+ ValueFromBlock intToDouble = m_out.anchor(
+ m_out.intToDouble(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(doubleTesting, doubleCase);
+ LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
+ m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
+
+ m_out.appendTo(doubleCase, nonDoubleCase);
+ ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
+ m_out.jump(continuation);
+
+ if (shouldConvertNonNumber) {
+ LBasicBlock undefinedCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble converting undefined case"));
+ LBasicBlock testNullCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble testing null case"));
+ LBasicBlock nullCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble converting null case"));
+ LBasicBlock testBooleanTrueCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble testing boolean true case"));
+ LBasicBlock convertBooleanTrueCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble convert boolean true case"));
+ LBasicBlock convertBooleanFalseCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble convert boolean false case"));
+
+ m_out.appendTo(nonDoubleCase, undefinedCase);
+ LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
+ m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
+
+ m_out.appendTo(undefinedCase, testNullCase);
+ ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
+ m_out.jump(continuation);
+
+ m_out.appendTo(testNullCase, nullCase);
+ LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
+ m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
+
+ m_out.appendTo(nullCase, testBooleanTrueCase);
+ ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
+ m_out.jump(continuation);
+
+ m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
+ LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
+ m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
+
+ m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
+ ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
+ m_out.jump(continuation);
+
+ m_out.appendTo(convertBooleanFalseCase, continuation);
+
+ LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
+ FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCell, valueIsNotBooleanFalse);
+ ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
+ return;
+ }
+ m_out.appendTo(nonDoubleCase, continuation);
+ FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
+ m_out.unreachable();
+
+ m_out.appendTo(continuation, lastNext);
+
+ setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble));
return;
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
}
}
-
+
+ void compileDoubleAsInt32()
+ {
+ LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
+ setInt32(integerValue);
+ }
+
void compileValueRep()
{
switch (m_node->child1().useKind()) {
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
}
}
return;
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ RELEASE_ASSERT_NOT_REACHED();
}
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
case UntypedUse: {
LValue value = lowJSValue(m_node->child1());
+ if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
+ setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
+ return;
+ }
+
LBasicBlock booleanCase = FTL_NEW_BLOCK(m_out, ("BooleanToNumber boolean case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("BooleanToNumber continuation"));
ValueFromBlock notBooleanResult = m_out.anchor(value);
- m_out.branch(isBoolean(value), unsure(booleanCase), unsure(continuation));
+ m_out.branch(
+ isBoolean(value, provenType(m_node->child1())),
+ unsure(booleanCase), unsure(continuation));
LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
}
default:
- LOWERING_FAILED(m_node, "Bad flush format");
+ RELEASE_ASSERT_NOT_REACHED();
return;
}
}
- void compileGetArgument()
- {
- VariableAccessData* variable = m_node->variableAccessData();
- VirtualRegister operand = variable->machineLocal();
- RELEASE_ASSERT(operand.isArgument());
-
- LValue jsValue = m_out.load64(addressFor(operand));
-
- switch (useKindFor(variable->flushFormat())) {
- case Int32Use:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotInt32(jsValue));
- setInt32(unboxInt32(jsValue));
- break;
- case CellUse:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotCell(jsValue));
- setJSValue(jsValue);
- break;
- case BooleanUse:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotBoolean(jsValue));
- setBoolean(unboxBoolean(jsValue));
- break;
- case UntypedUse:
- setJSValue(jsValue);
- break;
- default:
- LOWERING_FAILED(m_node, "Bad use kind");
- break;
- }
- }
-
void compileExtractOSREntryLocal()
{
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
}
- void compileGetLocal()
+ void compileGetStack()
{
- // GetLocals arise only for captured variables.
+ // GetLocals arise only for captured variables and arguments. For arguments, we might have
+ // already loaded it.
+ if (LValue value = m_loadedArgumentValues.get(m_node)) {
+ setJSValue(value);
+ return;
+ }
- VariableAccessData* variable = m_node->variableAccessData();
- AbstractValue& value = m_state.variables().operand(variable->local());
+ StackAccessData* data = m_node->stackAccessData();
+ AbstractValue& value = m_state.variables().operand(data->local);
- RELEASE_ASSERT(variable->isCaptured());
+ DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
+ DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
if (isInt32Speculation(value.m_type))
- setInt32(m_out.load32(payloadFor(variable->machineLocal())));
+ setInt32(m_out.load32(payloadFor(data->machineLocal)));
else
- setJSValue(m_out.load64(addressFor(variable->machineLocal())));
+ setJSValue(m_out.load64(addressFor(data->machineLocal)));
}
- void compileSetLocal()
+ void compilePutStack()
{
- VariableAccessData* variable = m_node->variableAccessData();
- switch (variable->flushFormat()) {
- case FlushedJSValue:
- case FlushedArguments: {
+ StackAccessData* data = m_node->stackAccessData();
+ switch (data->format) {
+ case FlushedJSValue: {
LValue value = lowJSValue(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
+ m_out.store64(value, addressFor(data->machineLocal));
break;
}
case FlushedDouble: {
LValue value = lowDouble(m_node->child1());
- m_out.storeDouble(value, addressFor(variable->machineLocal()));
+ m_out.storeDouble(value, addressFor(data->machineLocal));
break;
}
case FlushedInt32: {
LValue value = lowInt32(m_node->child1());
- m_out.store32(value, payloadFor(variable->machineLocal()));
+ m_out.store32(value, payloadFor(data->machineLocal));
break;
}
case FlushedInt52: {
LValue value = lowInt52(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
+ m_out.store64(value, addressFor(data->machineLocal));
break;
}
case FlushedCell: {
LValue value = lowCell(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
+ m_out.store64(value, addressFor(data->machineLocal));
break;
}
speculateBoolean(m_node->child1());
m_out.store64(
lowJSValue(m_node->child1(), ManualOperandSpeculation),
- addressFor(variable->machineLocal()));
+ addressFor(data->machineLocal));
break;
}
default:
- LOWERING_FAILED(m_node, "Bad flush format for argument");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad flush format");
+ break;
}
-
- m_availability.operand(variable->local()) = Availability(variable->flushedAt());
- }
-
- void compileMovHint()
- {
- ASSERT(m_node->containsMovHint());
- ASSERT(m_node->op() != ZombieHint);
-
- VirtualRegister operand = m_node->unlinkedLocal();
- m_availability.operand(operand) = Availability(m_node->child1().node());
- }
-
- void compileZombieHint()
- {
- m_availability.operand(m_node->unlinkedLocal()) = Availability::unavailable();
}
- void compilePhantom()
+ void compileNoOp()
{
DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
}
LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ToThis slow case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ToThis continuation"));
- m_out.branch(isCell(value), usually(isCellCase), rarely(slowCase));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
ValueFromBlock fastResult = m_out.anchor(value);
void compileValueAdd()
{
J_JITOperation_EJJ operation;
- if (!(m_state.forNode(m_node->child1()).m_type & SpecFullNumber)
- && !(m_state.forNode(m_node->child2()).m_type & SpecFullNumber))
+ if (!(provenType(m_node->child1()) & SpecFullNumber)
+ && !(provenType(m_node->child2()) & SpecFullNumber))
operation = operationValueAddNotNumber;
else
operation = operationValueAdd;
}
case Int52RepUse: {
- if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)
- && !m_state.forNode(m_node->child2()).couldBeType(SpecInt52)) {
+ if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)
+ && !abstractValue(m_node->child2()).couldBeType(SpecInt52)) {
Int52Kind kind;
LValue left = lowWhicheverInt52(m_node->child1(), kind);
LValue right = lowInt52(m_node->child2(), kind);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
+
+ void compileArithClz32()
+ {
+ LValue operand = lowInt32(m_node->child1());
+ LValue isZeroUndef = m_out.booleanFalse;
+ setInt32(m_out.ctlz32(operand, isZeroUndef));
+ }
void compileArithMul()
{
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
void compileArithCos() { setDouble(m_out.doubleCos(lowDouble(m_node->child1()))); }
- void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); }
-
- void compileArithFRound()
- {
- LValue floatValue = m_out.fpCast(lowDouble(m_node->child1()), m_out.floatType);
- setDouble(m_out.fpCast(floatValue, m_out.doubleType));
- }
-
- void compileArithNegate()
+ void compileArithPow()
{
- switch (m_node->child1().useKind()) {
- case Int32Use: {
- LValue value = lowInt32(m_node->child1());
-
- LValue result;
- if (!shouldCheckOverflow(m_node->arithMode()))
- result = m_out.neg(value);
+ // FIXME: investigate llvm.powi to better understand its performance characteristics.
+ // It might be better to have the inline loop in DFG too.
+ if (m_node->child2().useKind() == Int32Use)
+ setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
+ else {
+ LValue base = lowDouble(m_node->child1());
+ LValue exponent = lowDouble(m_node->child2());
+
+ LBasicBlock integerExponentIsSmallBlock = FTL_NEW_BLOCK(m_out, ("ArithPow test integer exponent is small."));
+ LBasicBlock integerExponentPowBlock = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, (int)double)."));
+ LBasicBlock doubleExponentPowBlockEntry = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, double)."));
+ LBasicBlock nanExceptionExponentIsInfinity = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check exponent is infinity."));
+ LBasicBlock nanExceptionBaseIsOne = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check base is one."));
+ LBasicBlock powBlock = FTL_NEW_BLOCK(m_out, ("ArithPow regular pow"));
+ LBasicBlock nanExceptionResultIsNaN = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, result is NaN."));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithPow continuation"));
+
+ LValue integerExponent = m_out.fpToInt32(exponent);
+ LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
+ LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
+ m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
+
+ LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
+ LValue integerExponentBelow1000 = m_out.below(integerExponent, m_out.constInt32(1000));
+ m_out.branch(integerExponentBelow1000, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
+
+ m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
+ ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
+ m_out.jump(continuation);
+
+ // If y is NaN, the result is NaN.
+ m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionExponentIsInfinity);
+ LValue exponentIsNaN;
+ if (provenType(m_node->child2()) & SpecDoubleNaN)
+ exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
+ else
+ exponentIsNaN = m_out.booleanFalse;
+ m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionExponentIsInfinity));
+
+ // If abs(x) is 1 and y is +infinity, the result is NaN.
+ // If abs(x) is 1 and y is -infinity, the result is NaN.
+ m_out.appendTo(nanExceptionExponentIsInfinity, nanExceptionBaseIsOne);
+ LValue absoluteExponent = m_out.doubleAbs(exponent);
+ LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionBaseIsOne), usually(powBlock));
+
+ m_out.appendTo(nanExceptionBaseIsOne, powBlock);
+ LValue absoluteBase = m_out.doubleAbs(base);
+ LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
+ m_out.branch(absoluteBaseIsOne, unsure(nanExceptionResultIsNaN), unsure(powBlock));
+
+ m_out.appendTo(powBlock, nanExceptionResultIsNaN);
+ ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
+ m_out.jump(continuation);
+
+ m_out.appendTo(nanExceptionResultIsNaN, continuation);
+ ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setDouble(m_out.phi(m_out.doubleType, powDoubleIntResult, powResult, pureNan));
+ }
+ }
+
+ void compileArithRound()
+ {
+ LBasicBlock realPartIsMoreThanHalf = FTL_NEW_BLOCK(m_out, ("ArithRound should round down"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithRound continuation"));
+
+ LValue value = lowDouble(m_node->child1());
+ LValue integerValue = m_out.ceil64(value);
+ ValueFromBlock integerValueResult = m_out.anchor(integerValue);
+
+ LValue realPart = m_out.doubleSub(integerValue, value);
+
+ m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
+ LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
+ ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+
+ LValue result = m_out.phi(m_out.doubleType, integerValueResult, integerValueRoundedDownResult);
+
+ if (producesInteger(m_node->arithRoundingMode())) {
+ LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
+ setInt32(integerValue);
+ } else
+ setDouble(result);
+ }
+
+ void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); }
+
+ void compileArithLog() { setDouble(m_out.doubleLog(lowDouble(m_node->child1()))); }
+
+ void compileArithFRound()
+ {
+ LValue floatValue = m_out.fpCast(lowDouble(m_node->child1()), m_out.floatType);
+ setDouble(m_out.fpCast(floatValue, m_out.doubleType));
+ }
+
+ void compileArithNegate()
+ {
+ switch (m_node->child1().useKind()) {
+ case Int32Use: {
+ LValue value = lowInt32(m_node->child1());
+
+ LValue result;
+ if (!shouldCheckOverflow(m_node->arithMode()))
+ result = m_out.neg(value);
else if (!shouldCheckNegativeZero(m_node->arithMode())) {
// We don't have a negate-with-overflow intrinsic. Hopefully this
// does the trick, though.
}
case Int52RepUse: {
- if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)) {
+ if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)) {
Int52Kind kind;
LValue value = lowWhicheverInt52(m_node->child1(), kind);
LValue result = m_out.neg(value);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
break;
}
}
LValue cell = lowCell(m_node->child1());
ExitKind exitKind;
- if (m_node->child1()->op() == WeakJSConstant)
- exitKind = BadWeakConstantCache;
+ if (m_node->child1()->hasConstant())
+ exitKind = BadConstantCache;
else
exitKind = BadCache;
LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
- if (m_node->structureSet().size() == 1) {
- speculate(
- exitKind, jsValueValue(cell), 0,
- m_out.notEqual(structureID, weakStructure(m_node->structureSet()[0])));
- return;
- }
-
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CheckStructure continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
- for (unsigned i = 0; i < m_node->structureSet().size() - 1; ++i) {
- LBasicBlock nextStructure = FTL_NEW_BLOCK(m_out, ("CheckStructure nextStructure"));
- m_out.branch(
- m_out.equal(structureID, weakStructure(m_node->structureSet()[i])),
- unsure(continuation), unsure(nextStructure));
- m_out.appendTo(nextStructure);
- }
-
- speculate(
- exitKind, jsValueValue(cell), 0,
- m_out.notEqual(structureID, weakStructure(m_node->structureSet().last())));
-
- m_out.jump(continuation);
- m_out.appendTo(continuation, lastNext);
+ checkStructure(
+ structureID, jsValueValue(cell), exitKind, m_node->structureSet(),
+ [this] (Structure* structure) {
+ return weakStructureID(structure);
+ });
}
- void compileStructureTransitionWatchpoint()
- {
- addWeakReference(m_node->structure());
- speculateCell(m_node->child1());
- }
-
- void compileCheckFunction()
+ void compileCheckCell()
{
LValue cell = lowCell(m_node->child1());
speculate(
- BadFunction, jsValueValue(cell), m_node->child1().node(),
- m_out.notEqual(cell, weakPointer(m_node->function())));
+ BadCell, jsValueValue(cell), m_node->child1().node(),
+ m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
}
- void compileCheckExecutable()
+ void compileCheckBadCell()
+ {
+ terminate(BadCell);
+ }
+
+ void compileCheckNotEmpty()
+ {
+ speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
+ }
+
+ void compileGetExecutable()
{
LValue cell = lowCell(m_node->child1());
-
- speculate(
- BadExecutable, jsValueValue(cell), m_node->child1().node(),
- m_out.notEqual(
- m_out.loadPtr(cell, m_heaps.JSFunction_executable),
- weakPointer(m_node->executable())));
+ speculateFunction(m_node->child1(), cell);
+ setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
}
void compileArrayifyToStructure()
LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
m_out.branch(
- m_out.notEqual(structureID, weakStructure(m_node->structure())),
+ m_out.notEqual(structureID, weakStructureID(m_node->structure())),
rarely(unexpectedStructure), usually(continuation));
LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
vmCall(m_out.operation(operationEnsureDouble), m_callFrame, cell);
break;
case Array::Contiguous:
- if (m_node->arrayMode().conversion() == Array::RageConvert)
- vmCall(m_out.operation(operationRageEnsureContiguous), m_callFrame, cell);
- else
- vmCall(m_out.operation(operationEnsureContiguous), m_callFrame, cell);
+ vmCall(m_out.operation(operationEnsureContiguous), m_callFrame, cell);
break;
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
vmCall(m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
break;
default:
- LOWERING_FAILED(m_node, "Bad array type");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ break;
}
structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
speculate(
BadIndexingType, jsValueValue(cell), 0,
- m_out.notEqual(structureID, weakStructure(m_node->structure())));
+ m_out.notEqual(structureID, weakStructureID(m_node->structure())));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
{
m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
- Structure* oldStructure = m_node->structureTransitionData().previousStructure;
- Structure* newStructure = m_node->structureTransitionData().newStructure;
+ Structure* oldStructure = m_node->transition()->previous;
+ Structure* newStructure = m_node->transition()->next;
ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
LValue cell = lowCell(m_node->child1());
m_out.store32(
- weakStructure(newStructure),
+ weakStructureID(newStructure),
cell, m_heaps.JSCell_structureID);
}
- void compilePhantomPutStructure()
- {
- m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
- }
-
void compileGetById()
{
// Pretty much the only reason why we don't also support GetByIdFlush is because:
LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("GetById untyped not cell case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetById untyped continuation"));
- m_out.branch(isCell(value), unsure(cellCase), unsure(notCellCase));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
ValueFromBlock cellResult = m_out.anchor(getById(value));
m_out.appendTo(notCellCase, continuation);
ValueFromBlock notCellResult = m_out.anchor(vmCall(
- m_out.operation(operationGetById),
- m_callFrame, getUndef(m_out.intPtr), value,
+ m_out.operation(operationGetByIdGeneric),
+ m_callFrame, value,
m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
m_out.jump(continuation);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
return;
}
}
LValue base = lowCell(m_node->child1());
LValue value = lowJSValue(m_node->child2());
- StringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
+ auto uid = m_graph.identifiers()[m_node->identifierNumber()];
// Arguments: id, bytes, target, numArgs, args...
unsigned stackmapID = m_stackmapIDs++;
- if (Options::verboseCompilation())
+ if (verboseCompilationEnabled())
dataLog(" Emitting PutById patchpoint with stackmap #", stackmapID, "\n");
LValue call = m_out.call(
Edge edge = m_node->child1();
LValue cell = lowCell(edge);
- if (m_node->arrayMode().alreadyChecked(m_graph, m_node, m_state.forNode(edge)))
+ if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
return;
speculate(
LBasicBlock wastefulCase = FTL_NEW_BLOCK(m_out, ("wasteful typed array"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("continuation branch"));
- LValue baseAddress = m_out.addPtr(basePtr, JSArrayBufferView::offsetOfMode());
+ LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
m_out.branch(
- m_out.notEqual(baseAddress , m_out.constIntPtr(WastefulTypedArray)),
+ m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
unsure(simpleCase), unsure(wastefulCase));
// begin simple case
LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
- ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(dataPtr, vectorPtr));
+ ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
setInt32(m_out.castToInt32(m_out.phi(m_out.intPtr, simpleOut, wastefulOut)));
}
- void compileGetMyArgumentsLength()
- {
- checkArgumentsNotCreated();
-
- RELEASE_ASSERT(!m_node->origin.semantic.inlineCallFrame);
- setInt32(m_out.add(m_out.load32NonNegative(payloadFor(JSStack::ArgumentCount)), m_out.constInt32(-1)));
- }
-
- void compileGetMyArgumentByVal()
- {
- checkArgumentsNotCreated();
-
- CodeOrigin codeOrigin = m_node->origin.semantic;
-
- LValue zeroBasedIndex = lowInt32(m_node->child1());
- LValue oneBasedIndex = m_out.add(zeroBasedIndex, m_out.int32One);
-
- LValue limit;
- if (codeOrigin.inlineCallFrame)
- limit = m_out.constInt32(codeOrigin.inlineCallFrame->arguments.size());
- else
- limit = m_out.load32(payloadFor(JSStack::ArgumentCount));
-
- speculate(Uncountable, noValue(), 0, m_out.aboveOrEqual(oneBasedIndex, limit));
-
- SymbolTable* symbolTable = m_graph.baselineCodeBlockFor(codeOrigin)->symbolTable();
- if (symbolTable->slowArguments()) {
- // FIXME: FTL should support activations.
- // https://bugs.webkit.org/show_bug.cgi?id=129576
-
- LOWERING_FAILED(m_node, "Unimplemented");
- return;
- }
-
- TypedPointer base;
- if (codeOrigin.inlineCallFrame) {
- VirtualRegister reg;
- if (codeOrigin.inlineCallFrame->arguments.size() <= 1)
- reg = virtualRegisterForLocal(0); // Doesn't matter what we do since we would have exited anyway.
- else
- reg = codeOrigin.inlineCallFrame->arguments[1].virtualRegister();
- base = addressFor(reg);
- } else
- base = addressFor(virtualRegisterForArgument(1));
-
- LValue pointer = m_out.baseIndex(
- base.value(), m_out.zeroExt(zeroBasedIndex, m_out.intPtr), ScaleEight);
- setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer)));
- }
-
void compileGetArrayLength()
{
switch (m_node->arrayMode().type()) {
return;
}
+ case Array::DirectArguments: {
+ LValue arguments = lowCell(m_node->child1());
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides)));
+ setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
+ return;
+ }
+
+ case Array::ScopedArguments: {
+ LValue arguments = lowCell(m_node->child1());
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notZero8(m_out.load8(arguments, m_heaps.ScopedArguments_overrodeThings)));
+ setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
+ return;
+ }
+
default:
if (isTypedView(m_node->arrayMode().typedArrayType())) {
setInt32(
return;
}
- LOWERING_FAILED(m_node, "Bad array type");
+ DFG_CRASH(m_graph, m_node, "Bad array type");
return;
}
}
if (m_node->arrayMode().isInBounds()) {
LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
- speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
+ LValue isHole = m_out.isZero64(result);
+ if (m_node->arrayMode().isSaneChain()) {
+ DFG_ASSERT(
+ m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous);
+ result = m_out.select(
+ isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
+ } else
+ speculate(LoadFromHole, noValue(), 0, isHole);
setJSValue(result);
return;
}
return;
}
+ case Array::DirectArguments: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides)));
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.aboveOrEqual(
+ index,
+ m_out.load32NonNegative(base, m_heaps.DirectArguments_length)));
+
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
+ setJSValue(m_out.load64(address));
+ return;
+ }
+
+ case Array::ScopedArguments: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.aboveOrEqual(
+ index,
+ m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
+
+ LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
+ LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
+
+ LBasicBlock namedCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments named case"));
+ LBasicBlock overflowCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments overflow case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments continuation"));
+
+ m_out.branch(
+ m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
+
+ LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
+
+ LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
+ LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
+
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
+ LValue scopeOffset = m_out.load32(address);
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
+
+ address = m_out.baseIndex(
+ m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset));
+ ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
+ m_out.jump(continuation);
+
+ m_out.appendTo(overflowCase, continuation);
+
+ address = m_out.baseIndex(
+ m_heaps.ScopedArguments_overflowStorage, base,
+ m_out.zeroExtPtr(m_out.sub(index, namedLength)));
+ LValue overflowValue = m_out.load64(address);
+ speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
+ ValueFromBlock overflowResult = m_out.anchor(overflowValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.int64, namedResult, overflowResult));
+ return;
+ }
+
case Array::Generic: {
setJSValue(vmCall(
m_out.operation(operationGetByVal), m_callFrame,
m_out.add(
storage,
m_out.shl(
- m_out.zeroExt(index, m_out.intPtr),
+ m_out.zeroExtPtr(index),
m_out.constIntPtr(logElementSize(type)))));
if (isInt(type)) {
result = m_out.load32(pointer);
break;
default:
- LOWERING_FAILED(m_node, "Bad element size");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad element size");
}
if (elementSize(type) < 4) {
result = m_out.loadDouble(pointer);
break;
default:
- LOWERING_FAILED(m_node, "Bad typed array type");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad typed array type");
}
setDouble(result);
return;
}
- LOWERING_FAILED(m_node, "Bad array type");
+ DFG_CRASH(m_graph, m_node, "Bad array type");
return;
} }
}
+ void compileGetMyArgumentByVal()
+ {
+ InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+
+ LValue index = lowInt32(m_node->child2());
+
+ LValue limit;
+ if (inlineCallFrame && !inlineCallFrame->isVarargs())
+ limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1);
+ else {
+ VirtualRegister argumentCountRegister;
+ if (!inlineCallFrame)
+ argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
+ else
+ argumentCountRegister = inlineCallFrame->argumentCountRegister;
+ limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
+ }
+
+ speculate(ExoticObjectMode, noValue(), 0, m_out.aboveOrEqual(index, limit));
+
+ TypedPointer base;
+ if (inlineCallFrame) {
+ if (inlineCallFrame->arguments.size() <= 1) {
+ // We should have already exited due to the bounds check, above. Just tell the
+ // compiler that anything dominated by this instruction is not reachable, so
+ // that we don't waste time generating such code. This will also plant some
+ // kind of crashing instruction so that if by some fluke the bounds check didn't
+ // work, we'll crash in an easy-to-see way.
+ didAlreadyTerminate();
+ return;
+ }
+ base = addressFor(inlineCallFrame->arguments[1].virtualRegister());
+ } else
+ base = addressFor(virtualRegisterForArgument(1));
+
+ LValue pointer = m_out.baseIndex(
+ base.value(), m_out.zeroExt(index, m_out.intPtr), ScaleEight);
+ setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer)));
+ }
+
void compilePutByVal()
{
Edge child1 = m_graph.varArgChild(m_node, 0);
TypedPointer elementPointer = m_out.baseIndex(
m_node->arrayMode().type() == Array::Int32 ?
m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(child2).m_value);
+ storage, m_out.zeroExtPtr(index), provenValue(child2));
if (m_node->op() == PutByValAlias) {
m_out.store64(value, elementPointer);
m_out.doubleNotEqualOrUnordered(value, value));
TypedPointer elementPointer = m_out.baseIndex(
- m_heaps.indexedDoubleProperties,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(child2).m_value);
+ m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
+ provenValue(child2));
if (m_node->op() == PutByValAlias) {
m_out.storeDouble(value, elementPointer);
}
default:
- LOWERING_FAILED(m_node, "Bad array type");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad array type");
}
m_out.jump(continuation);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
}
switch (elementSize(type)) {
refType = m_out.ref32;
break;
default:
- LOWERING_FAILED(m_node, "Bad element size");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad element size");
}
} else /* !isInt(type) */ {
LValue value = lowDouble(child3);
refType = m_out.refDouble;
break;
default:
- LOWERING_FAILED(m_node, "Bad typed array type");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad typed array type");
}
}
return;
}
- LOWERING_FAILED(m_node, "Bad array type");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ break;
}
}
LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
m_out.store(
- value,
- m_out.baseIndex(heap, storage, m_out.zeroExt(prevLength, m_out.intPtr)),
- refType);
+ value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), refType);
LValue newLength = m_out.add(prevLength, m_out.int32One);
m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
}
default:
- LOWERING_FAILED(m_node, "Bad array type");
+ DFG_CRASH(m_graph, m_node, "Bad array type");
return;
}
}
LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
LValue newLength = m_out.sub(prevLength, m_out.int32One);
m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
- TypedPointer pointer = m_out.baseIndex(
- heap, storage, m_out.zeroExt(newLength, m_out.intPtr));
+ TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
if (m_node->arrayMode().type() != Array::Double) {
LValue result = m_out.load64(pointer);
m_out.store64(m_out.int64Zero, pointer);
}
default:
- LOWERING_FAILED(m_node, "Bad array type");
+ DFG_CRASH(m_graph, m_node, "Bad array type");
return;
}
}
- void compileNewObject()
+ void compileCreateActivation()
{
- Structure* structure = m_node->structure();
- size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
- MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
+ LValue scope = lowCell(m_node->child1());
+ SymbolTable* table = m_node->castOperand<SymbolTable*>();
+ Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
+
+ if (table->singletonScope()->isStillValid()) {
+ LValue callResult = vmCall(
+ m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
+ scope, weakPointer(table));
+ setJSValue(callResult);
+ return;
+ }
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("NewObject slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewObject continuation"));
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateActivation slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateActivation continuation"));
LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
- ValueFromBlock fastResult = m_out.anchor(allocateObject(
- m_out.constIntPtr(allocator), structure, m_out.intPtrZero, slowPath));
+ LValue fastObject = allocateObject<JSLexicalEnvironment>(
+ JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
+
+ // We don't need memory barriers since we just fast-created the activation, so the
+ // activation must be young.
+ m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
+ m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
+ for (unsigned i = 0; i < table->scopeSize(); ++i) {
+ m_out.store64(
+ m_out.constInt64(JSValue::encode(jsUndefined())),
+ fastObject, m_heaps.JSEnvironmentRecord_variables[i]);
+ }
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
m_out.jump(continuation);
m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowResult = m_out.anchor(vmCall(
- m_out.operation(operationNewObject), m_callFrame, m_out.constIntPtr(structure)));
+ LValue callResult = vmCall(
+ m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
+ scope, weakPointer(table));
+ ValueFromBlock slowResult = m_out.anchor(callResult);
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
}
- void compileNewArray()
+ void compileNewFunction()
{
- // First speculate appropriately on all of the children. Do this unconditionally up here
- // because some of the slow paths may otherwise forget to do it. It's sort of arguable
- // that doing the speculations up here might be unprofitable for RA - so we can consider
- // sinking this to below the allocation fast path if we find that this has a lot of
- // register pressure.
- for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
- speculate(m_graph.varArgChild(m_node, operandIndex));
+ LValue scope = lowCell(m_node->child1());
+ FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
+ if (executable->singletonFunction()->isStillValid()) {
+ LValue callResult = vmCall(
+ m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
+ setJSValue(callResult);
+ return;
+ }
- JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
- Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
- m_node->indexingType());
+ Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure();
- RELEASE_ASSERT(structure->indexingType() == m_node->indexingType());
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("NewFunction slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewFunction continuation"));
- if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
- unsigned numElements = m_node->numChildren();
-
- ArrayValues arrayValues = allocateJSArray(structure, numElements);
-
- for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
- Edge edge = m_graph.varArgChild(m_node, operandIndex);
-
- switch (m_node->indexingType()) {
- case ALL_BLANK_INDEXING_TYPES:
- case ALL_UNDECIDED_INDEXING_TYPES:
- CRASH();
- break;
-
- case ALL_DOUBLE_INDEXING_TYPES:
- m_out.storeDouble(
- lowDouble(edge),
- arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
- break;
-
- case ALL_INT32_INDEXING_TYPES:
- case ALL_CONTIGUOUS_INDEXING_TYPES:
- m_out.store64(
- lowJSValue(edge, ManualOperandSpeculation),
- arrayValues.butterfly,
- m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
- break;
-
- default:
- CRASH();
- }
- }
-
- setJSValue(arrayValues.array);
- return;
- }
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
- if (!m_node->numChildren()) {
- setJSValue(vmCall(
- m_out.operation(operationNewEmptyArray), m_callFrame,
- m_out.constIntPtr(structure)));
- return;
- }
+ LValue fastObject = allocateObject<JSFunction>(
+ structure, m_out.intPtrZero, slowPath);
- size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
- ASSERT(scratchSize);
- ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+ // We don't need memory barriers since we just fast-created the function, so it
+ // must be young.
+ m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
+ m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
+ m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue callResult = vmCall(
+ m_out.operation(operationNewFunctionWithInvalidatedReallocationWatchpoint),
+ m_callFrame, scope, weakPointer(executable));
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
+ }
+
+ void compileCreateDirectArguments()
+ {
+ // FIXME: A more effective way of dealing with the argument count and callee is to have
+ // them be explicit arguments to this node.
+ // https://bugs.webkit.org/show_bug.cgi?id=142207
+
+ Structure* structure =
+ m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure();
+
+ unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
+
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments continuation"));
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ ArgumentsLength length = getArgumentsLength();
+
+ LValue fastObject;
+ if (length.isKnown) {
+ fastObject = allocateObject<DirectArguments>(
+ DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
+ m_out.intPtrZero, slowPath);
+ } else {
+ LValue size = m_out.add(
+ m_out.shl(length.value, m_out.constInt32(3)),
+ m_out.constInt32(DirectArguments::storageOffset()));
+
+ size = m_out.select(
+ m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
+ size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
+
+ fastObject = allocateVariableSizedObject<DirectArguments>(
+ size, structure, m_out.intPtrZero, slowPath);
+ }
+
+ m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
+ m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
+ m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_overrides);
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue callResult = vmCall(
+ m_out.operation(operationCreateDirectArguments), m_callFrame, weakPointer(structure),
+ length.value, m_out.constInt32(minCapacity));
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(m_out.intPtr, fastResult, slowResult);
+
+ m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
+
+ if (length.isKnown) {
+ VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
+ for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
+ m_out.store64(
+ m_out.load64(addressFor(start + i)),
+ result, m_heaps.DirectArguments_storage[i]);
+ }
+ } else {
+ LValue stackBase = getArgumentsStart();
+
+ LBasicBlock loop = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments loop body"));
+ LBasicBlock end = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments loop end"));
+
+ ValueFromBlock originalLength;
+ if (minCapacity) {
+ LValue capacity = m_out.select(
+ m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
+ length.value,
+ m_out.constInt32(minCapacity));
+ originalLength = m_out.anchor(m_out.zeroExtPtr(capacity));
+ m_out.jump(loop);
+ } else {
+ originalLength = m_out.anchor(m_out.zeroExtPtr(length.value));
+ m_out.branch(m_out.isNull(originalLength.value()), unsure(end), unsure(loop));
+ }
+
+ lastNext = m_out.appendTo(loop, end);
+ LValue previousIndex = m_out.phi(m_out.intPtr, originalLength);
+ LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
+ m_out.store64(
+ m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
+ m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
+ ValueFromBlock nextIndex = m_out.anchor(index);
+ addIncoming(previousIndex, nextIndex);
+ m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
+
+ m_out.appendTo(end, lastNext);
+ }
+
+ setJSValue(result);
+ }
+
+ void compileCreateScopedArguments()
+ {
+ LValue scope = lowCell(m_node->child1());
+
+ LValue result = vmCall(
+ m_out.operation(operationCreateScopedArguments), m_callFrame,
+ weakPointer(
+ m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
+ getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
+
+ setJSValue(result);
+ }
+
+ void compileCreateClonedArguments()
+ {
+ LValue result = vmCall(
+ m_out.operation(operationCreateClonedArguments), m_callFrame,
+ weakPointer(
+ m_graph.globalObjectFor(m_node->origin.semantic)->outOfBandArgumentsStructure()),
+ getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
+
+ setJSValue(result);
+ }
+
+ void compileNewObject()
+ {
+ setJSValue(allocateObject(m_node->structure()));
+ }
+
+ void compileNewArray()
+ {
+ // First speculate appropriately on all of the children. Do this unconditionally up here
+ // because some of the slow paths may otherwise forget to do it. It's sort of arguable
+ // that doing the speculations up here might be unprofitable for RA - so we can consider
+ // sinking this to below the allocation fast path if we find that this has a lot of
+ // register pressure.
+ for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
+ speculate(m_graph.varArgChild(m_node, operandIndex));
+
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
+ m_node->indexingType());
+
+ DFG_ASSERT(m_graph, m_node, structure->indexingType() == m_node->indexingType());
+
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
+ unsigned numElements = m_node->numChildren();
+
+ ArrayValues arrayValues = allocateJSArray(structure, numElements);
+
+ for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
+ Edge edge = m_graph.varArgChild(m_node, operandIndex);
+
+ switch (m_node->indexingType()) {
+ case ALL_BLANK_INDEXING_TYPES:
+ case ALL_UNDECIDED_INDEXING_TYPES:
+ DFG_CRASH(m_graph, m_node, "Bad indexing type");
+ break;
+
+ case ALL_DOUBLE_INDEXING_TYPES:
+ m_out.storeDouble(
+ lowDouble(edge),
+ arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
+ break;
+
+ case ALL_INT32_INDEXING_TYPES:
+ case ALL_CONTIGUOUS_INDEXING_TYPES:
+ m_out.store64(
+ lowJSValue(edge, ManualOperandSpeculation),
+ arrayValues.butterfly,
+ m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
+ break;
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
+ break;
+ }
+ }
+
+ setJSValue(arrayValues.array);
+ return;
+ }
+
+ if (!m_node->numChildren()) {
+ setJSValue(vmCall(
+ m_out.operation(operationNewEmptyArray), m_callFrame,
+ m_out.constIntPtr(structure)));
+ return;
+ }
+
+ size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
+ ASSERT(scratchSize);
+ ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
Edge edge = m_graph.varArgChild(m_node, operandIndex);
Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
m_node->indexingType());
- RELEASE_ASSERT(structure->indexingType() == m_node->indexingType());
+ DFG_ASSERT(m_graph, m_node, structure->indexingType() == m_node->indexingType());
if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
unsigned numElements = m_node->numConstants();
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewArrayWithSize continuation"));
m_out.branch(
- m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)),
+ m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
rarely(largeCase), usually(fastCase));
LBasicBlock lastNext = m_out.appendTo(fastCase, largeCase);
}
LValue structureValue = m_out.select(
- m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)),
+ m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
m_out.constIntPtr(
globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)),
m_out.constIntPtr(structure));
void compileAllocatePropertyStorage()
{
- StructureTransitionData& data = m_node->structureTransitionData();
LValue object = lowCell(m_node->child1());
-
- setStorage(allocatePropertyStorage(object, data.previousStructure));
+ setStorage(allocatePropertyStorage(object, m_node->transition()->previous));
}
void compileReallocatePropertyStorage()
{
- StructureTransitionData& data = m_node->structureTransitionData();
+ Transition* transition = m_node->transition();
LValue object = lowCell(m_node->child1());
LValue oldStorage = lowStorage(m_node->child2());
setStorage(
reallocatePropertyStorage(
- object, oldStorage, data.previousStructure, data.newStructure));
+ object, oldStorage, transition->previous, transition->next));
}
- void compileToString()
+ void compileToStringOrCallStringConstructor()
{
switch (m_node->child1().useKind()) {
case StringObjectUse: {
if (m_node->child1().useKind() == CellUse)
isCellPredicate = m_out.booleanTrue;
else
- isCellPredicate = this->isCell(value);
+ isCellPredicate = this->isCell(value, provenType(m_node->child1()));
m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
LBasicBlock lastNext = m_out.appendTo(isCell, notString);
ValueFromBlock simpleResult = m_out.anchor(value);
LValue isStringPredicate;
if (m_node->child1()->prediction() & SpecString) {
- isStringPredicate = m_out.equal(
- m_out.load32(value, m_heaps.JSCell_structureID),
- m_out.constInt32(vm().stringStructure->id()));
+ isStringPredicate = isString(value, provenType(m_node->child1()));
} else
isStringPredicate = m_out.booleanFalse;
m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
m_out.appendTo(notString, continuation);
LValue operation;
if (m_node->child1().useKind() == CellUse)
- operation = m_out.operation(operationToStringOnCell);
+ operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
else
- operation = m_out.operation(operationToString);
+ operation = m_out.operation(m_node->op() == ToString ? operationToString : operationCallStringConstructor);
ValueFromBlock convertedResult = m_out.anchor(vmCall(operation, m_callFrame, value));
m_out.jump(continuation);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
}
}
Vector<ValueFromBlock, 3> results;
results.append(m_out.anchor(value));
- m_out.branch(isCell(value), unsure(isCellCase), unsure(continuation));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
results.append(m_out.anchor(value));
- m_out.branch(isObject(value), unsure(isObjectCase), unsure(continuation));
+ m_out.branch(
+ isObject(value, provenType(m_node->child1())),
+ unsure(isObjectCase), unsure(continuation));
m_out.appendTo(isObjectCase, continuation);
results.append(m_out.anchor(vmCall(
LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
MarkedAllocator& allocator =
- vm().heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
+ vm().heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
LValue result = allocateCell(
m_out.constIntPtr(&allocator),
m_out.operation(operationMakeRope3), m_callFrame, kids[0], kids[1], kids[2]));
break;
default:
- LOWERING_FAILED(m_node, "Bad number of children");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad number of children");
break;
}
m_out.jump(continuation);
ValueFromBlock char8Bit = m_out.anchor(m_out.zeroExt(
m_out.load8(m_out.baseIndex(
- m_heaps.characters8,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
+ m_heaps.characters8, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))),
m_out.int32));
m_out.jump(bitsContinuation);
ValueFromBlock char16Bit = m_out.anchor(m_out.zeroExt(
m_out.load16(m_out.baseIndex(
- m_heaps.characters16,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
+ m_heaps.characters16, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))),
m_out.int32));
m_out.branch(
m_out.aboveOrEqual(char16Bit.value(), m_out.constInt32(0x100)),
LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
- m_heaps.singleCharacterStrings, smallStrings,
- m_out.zeroExt(character, m_out.intPtr)))));
+ m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
m_out.jump(continuation);
m_out.appendTo(slowPath, continuation);
JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
if (globalObject->stringPrototypeChainIsSane()) {
+ // FIXME: This could be captured using a Speculation mode that means
+ // "out-of-bounds loads return a trivial value", something like
+ // SaneChainOutOfBounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=144668
+
+ m_graph.watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
+ m_graph.watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
+
LBasicBlock negativeIndex = FTL_NEW_BLOCK(m_out, ("GetByVal String negative index"));
results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
ValueFromBlock char8Bit = m_out.anchor(m_out.zeroExt(
m_out.load8(m_out.baseIndex(
- m_heaps.characters8,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
+ m_heaps.characters8, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))),
m_out.int32));
m_out.jump(continuation);
ValueFromBlock char16Bit = m_out.anchor(m_out.zeroExt(
m_out.load16(m_out.baseIndex(
- m_heaps.characters16,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
+ m_heaps.characters16, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))),
m_out.int32));
m_out.jump(continuation);
void compileGetByOffset()
{
- StorageAccessData& data =
- m_graph.m_storageAccessData[m_node->storageAccessDataIndex()];
+ StorageAccessData& data = m_node->storageAccessData();
setJSValue(loadProperty(
lowStorage(m_node->child1()), data.identifierNumber, data.offset));
}
+ void compileGetGetter()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
+ }
+
+ void compileGetSetter()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
+ }
+
void compileMultiGetByOffset()
{
LValue base = lowCell(m_node->child1());
MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
+
+ if (data.variants.isEmpty()) {
+ // Protect against creating a Phi function with zero inputs. LLVM doesn't like that.
+ terminate(BadCache);
+ return;
+ }
Vector<LBasicBlock, 2> blocks(data.variants.size());
for (unsigned i = data.variants.size(); i--;)
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("MultiGetByOffset continuation"));
Vector<SwitchCase, 2> cases;
+ StructureSet baseSet;
for (unsigned i = data.variants.size(); i--;) {
GetByIdVariant variant = data.variants[i];
for (unsigned j = variant.structureSet().size(); j--;) {
- cases.append(SwitchCase(
- weakStructure(variant.structureSet()[j]), blocks[i], Weight(1)));
+ Structure* structure = variant.structureSet()[j];
+ baseSet.add(structure);
+ cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
}
}
m_out.switchInstruction(
m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
GetByIdVariant variant = data.variants[i];
+ baseSet.merge(variant.structureSet());
LValue result;
- if (variant.specificValue())
- result = m_out.constInt64(JSValue::encode(variant.specificValue()));
+ JSValue constantResult;
+ if (variant.alternateBase()) {
+ constantResult = m_graph.tryGetConstantProperty(
+ variant.alternateBase(), variant.baseStructure(), variant.offset());
+ }
+ if (constantResult)
+ result = m_out.constInt64(JSValue::encode(constantResult));
else {
LValue propertyBase;
- if (variant.chain())
- propertyBase = weakPointer(variant.chain()->terminalPrototype());
+ if (variant.alternateBase())
+ propertyBase = weakPointer(variant.alternateBase());
else
propertyBase = base;
if (!isInlineOffset(variant.offset()))
}
m_out.appendTo(exit, continuation);
- terminate(BadCache);
+ if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet))
+ speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
m_out.unreachable();
m_out.appendTo(continuation, lastNext);
void compilePutByOffset()
{
- StorageAccessData& data =
- m_graph.m_storageAccessData[m_node->storageAccessDataIndex()];
+ StorageAccessData& data = m_node->storageAccessData();
storeProperty(
lowJSValue(m_node->child3()),
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("MultiPutByOffset continuation"));
Vector<SwitchCase, 2> cases;
+ StructureSet baseSet;
for (unsigned i = data.variants.size(); i--;) {
PutByIdVariant variant = data.variants[i];
- cases.append(
- SwitchCase(weakStructure(variant.oldStructure()), blocks[i], Weight(1)));
+ for (unsigned j = variant.oldStructure().size(); j--;) {
+ Structure* structure = variant.oldStructure()[j];
+ baseSet.add(structure);
+ cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
+ }
}
m_out.switchInstruction(
m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
LBasicBlock lastNext = m_out.m_nextBlock;
for (unsigned i = data.variants.size(); i--;) {
- m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
+ m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
PutByIdVariant variant = data.variants[i];
} else {
m_graph.m_plan.transitions.addLazily(
codeBlock(), m_node->origin.semantic.codeOriginOwner(),
- variant.oldStructure(), variant.newStructure());
+ variant.oldStructureForTransition(), variant.newStructure());
storage = storageForTransition(
- base, variant.offset(), variant.oldStructure(), variant.newStructure());
+ base, variant.offset(),
+ variant.oldStructureForTransition(), variant.newStructure());
- ASSERT(variant.oldStructure()->indexingType() == variant.newStructure()->indexingType());
- ASSERT(variant.oldStructure()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
- ASSERT(variant.oldStructure()->typeInfo().type() == variant.newStructure()->typeInfo().type());
+ ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
+ ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
+ ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
m_out.store32(
- weakStructure(variant.newStructure()), base, m_heaps.JSCell_structureID);
+ weakStructureID(variant.newStructure()), base, m_heaps.JSCell_structureID);
}
storeProperty(value, storage, data.identifierNumber, variant.offset());
}
m_out.appendTo(exit, continuation);
- terminate(BadCache);
+ if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet))
+ speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
m_out.unreachable();
m_out.appendTo(continuation, lastNext);
void compileGetGlobalVar()
{
- setJSValue(m_out.load64(m_out.absolute(m_node->registerPointer())));
+ setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
}
void compilePutGlobalVar()
{
m_out.store64(
- lowJSValue(m_node->child1()), m_out.absolute(m_node->registerPointer()));
+ lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
}
void compileNotifyWrite()
{
- VariableWatchpointSet* set = m_node->variableWatchpointSet();
-
- LValue value = lowJSValue(m_node->child1());
+ WatchpointSet* set = m_node->watchpointSet();
LBasicBlock isNotInvalidated = FTL_NEW_BLOCK(m_out, ("NotifyWrite not invalidated case"));
- LBasicBlock notifySlow = FTL_NEW_BLOCK(m_out, ("NotifyWrite notify slow case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NotifyWrite continuation"));
LValue state = m_out.load8(m_out.absolute(set->addressOfState()));
-
m_out.branch(
m_out.equal(state, m_out.constInt8(IsInvalidated)),
usually(continuation), rarely(isNotInvalidated));
- LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, notifySlow);
-
- m_out.branch(
- m_out.equal(value, m_out.load64(m_out.absolute(set->addressOfInferredValue()))),
- unsure(continuation), unsure(notifySlow));
-
- m_out.appendTo(notifySlow, continuation);
+ LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
- vmCall(m_out.operation(operationNotifyWrite), m_callFrame, m_out.constIntPtr(set), value);
+ vmCall(m_out.operation(operationNotifyWrite), m_callFrame, m_out.constIntPtr(set));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
setJSValue(m_out.loadPtr(addressFor(JSStack::Callee)));
}
- void compileGetScope()
+ void compileGetArgumentCount()
{
- setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
+ setInt32(m_out.load32(payloadFor(JSStack::ArgumentCount)));
}
- void compileGetMyScope()
+ void compileGetScope()
{
- setJSValue(m_out.loadPtr(addressFor(
- m_node->origin.semantic.stackOffset() + JSStack::ScopeChain)));
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
}
void compileSkipScope()
setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
}
- void compileGetClosureRegisters()
+ void compileGetClosureVar()
{
- if (WriteBarrierBase<Unknown>* registers = m_graph.tryGetRegisters(m_node->child1().node())) {
- setStorage(m_out.constIntPtr(registers));
- return;
- }
-
- setStorage(m_out.loadPtr(
- lowCell(m_node->child1()), m_heaps.JSVariableObject_registers));
+ setJSValue(
+ m_out.load64(
+ lowCell(m_node->child1()),
+ m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()]));
}
- void compileGetClosureVar()
+ void compilePutClosureVar()
{
- setJSValue(m_out.load64(
- addressFor(lowStorage(m_node->child1()), m_node->varNumber())));
+ m_out.store64(
+ lowJSValue(m_node->child2()),
+ lowCell(m_node->child1()),
+ m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()]);
}
- void compilePutClosureVar()
+ void compileGetFromArguments()
+ {
+ setJSValue(
+ m_out.load64(
+ lowCell(m_node->child1()),
+ m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
+ }
+
+ void compilePutToArguments()
{
m_out.store64(
- lowJSValue(m_node->child3()),
- addressFor(lowStorage(m_node->child2()), m_node->varNumber()));
+ lowJSValue(m_node->child2()),
+ lowCell(m_node->child1()),
+ m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
}
void compileCompareEq()
nonSpeculativeCompare(LLVMIntEQ, operationCompareEq);
return;
}
-
- LOWERING_FAILED(m_node, "Bad use kinds");
+
+ DFG_CRASH(m_graph, m_node, "Bad use kinds");
}
void compileCompareEqConstant()
{
- ASSERT(m_graph.valueOfJSConstant(m_node->child2().node()).isNull());
+ ASSERT(m_node->child2()->asJSValue().isNull());
setBoolean(
equalNullOrUndefined(
m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined));
m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
return;
}
+
+ if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
+ setBoolean(
+ m_out.equal(
+ lowNonNullObject(m_node->child1()),
+ lowJSValue(m_node->child2())));
+ return;
+ }
+ if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
+ setBoolean(
+ m_out.equal(
+ lowNonNullObject(m_node->child2()),
+ lowJSValue(m_node->child1())));
+ return;
+ }
+
if (m_node->isBinaryUseKind(ObjectUse)) {
setBoolean(
m_out.equal(
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CompareStrictEq StringIdent to NotStringVar continuation"));
ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
- m_out.branch(isCell(rightValue), unsure(isCellCase), unsure(continuation));
+ m_out.branch(
+ isCell(rightValue, provenType(rightEdge)),
+ unsure(isCellCase), unsure(continuation));
LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
- m_out.branch(isString(rightValue), unsure(isStringCase), unsure(continuation));
+ m_out.branch(
+ isString(rightValue, provenType(rightEdge)),
+ unsure(isStringCase), unsure(continuation));
m_out.appendTo(isStringCase, continuation);
LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
return;
}
- LOWERING_FAILED(m_node, "Bad use kinds");
+ DFG_CRASH(m_graph, m_node, "Bad use kinds");
}
void compileCompareStrictEqConstant()
{
- JSValue constant = m_graph.valueOfJSConstant(m_node->child2().node());
+ JSValue constant = m_node->child2()->asJSValue();
setBoolean(
m_out.equal(
{
setBoolean(m_out.bitNot(boolify(m_node->child1())));
}
-
+#if ENABLE(FTL_NATIVE_CALL_INLINING)
+ void compileNativeCallOrConstruct()
+ {
+ int numPassedArgs = m_node->numChildren() - 1;
+ int numArgs = numPassedArgs;
+
+ JSFunction* knownFunction = m_node->castOperand<JSFunction*>();
+ NativeFunction function = knownFunction->nativeFunction();
+
+ Dl_info info;
+ if (!dladdr((void*)function, &info))
+ ASSERT(false); // if we couldn't find the native function this doesn't bode well.
+
+ LValue callee = getFunctionBySymbol(info.dli_sname);
+
+ bool notInlinable;
+ if ((notInlinable = !callee))
+ callee = m_out.operation(function);
+
+ m_out.storePtr(m_callFrame, m_execStorage, m_heaps.CallFrame_callerFrame);
+ m_out.storePtr(constNull(m_out.intPtr), addressFor(m_execStorage, JSStack::CodeBlock));
+ m_out.storePtr(weakPointer(knownFunction), addressFor(m_execStorage, JSStack::Callee));
+
+ m_out.store64(m_out.constInt64(numArgs), addressFor(m_execStorage, JSStack::ArgumentCount));
+
+ for (int i = 0; i < numPassedArgs; ++i) {
+ m_out.storePtr(lowJSValue(m_graph.varArgChild(m_node, 1 + i)),
+ addressFor(m_execStorage, JSStack::ThisArgument, i * sizeof(Register)));
+ }
+
+ LValue calleeCallFrame = m_out.address(m_execState, m_heaps.CallFrame_callerFrame).value();
+ m_out.storePtr(m_out.ptrToInt(calleeCallFrame, m_out.intPtr), m_out.absolute(&vm().topCallFrame));
+
+ LType typeCalleeArg;
+ getParamTypes(getElementType(typeOf(callee)), &typeCalleeArg);
+
+ LValue argument = notInlinable
+ ? m_out.ptrToInt(calleeCallFrame, typeCalleeArg)
+ : m_out.bitCast(calleeCallFrame, typeCalleeArg);
+ LValue call = vmCall(callee, argument);
+
+ if (verboseCompilationEnabled())
+ dataLog("Native calling: ", info.dli_sname, "\n");
+
+ setJSValue(call);
+ }
+#endif
+
void compileCallOrConstruct()
{
- int dummyThisArgument = m_node->op() == Call ? 0 : 1;
int numPassedArgs = m_node->numChildren() - 1;
- int numArgs = numPassedArgs + dummyThisArgument;
-
- LValue callee = lowJSValue(m_graph.varArgChild(m_node, 0));
-
+ int numArgs = numPassedArgs;
+
+ LValue jsCallee = lowJSValue(m_graph.varArgChild(m_node, 0));
+
unsigned stackmapID = m_stackmapIDs++;
Vector<LValue> arguments;
arguments.append(m_out.constInt32(sizeOfCall()));
arguments.append(constNull(m_out.ref8));
arguments.append(m_out.constInt32(1 + JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + numArgs));
- arguments.append(callee); // callee -> %rax
+ arguments.append(jsCallee); // callee -> %rax
arguments.append(getUndef(m_out.int64)); // code block
- arguments.append(getUndef(m_out.int64)); // scope chain
- arguments.append(callee); // callee -> stack
+ arguments.append(jsCallee); // callee -> stack
arguments.append(m_out.constInt64(numArgs)); // argument count and zeros for the tag
- if (dummyThisArgument)
- arguments.append(getUndef(m_out.int64));
for (int i = 0; i < numPassedArgs; ++i)
arguments.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
setJSValue(call);
}
+ void compileCallOrConstructVarargs()
+ {
+ LValue jsCallee = lowJSValue(m_node->child1());
+ LValue thisArg = lowJSValue(m_node->child3());
+
+ LValue jsArguments = nullptr;
+
+ switch (m_node->op()) {
+ case CallVarargs:
+ case ConstructVarargs:
+ jsArguments = lowJSValue(m_node->child2());
+ break;
+ case CallForwardVarargs:
+ case ConstructForwardVarargs:
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "bad node type");
+ break;
+ }
+
+ unsigned stackmapID = m_stackmapIDs++;
+
+ Vector<LValue> arguments;
+ arguments.append(m_out.constInt64(stackmapID));
+ arguments.append(m_out.constInt32(sizeOfICFor(m_node)));
+ arguments.append(constNull(m_out.ref8));
+ arguments.append(m_out.constInt32(2 + !!jsArguments));
+ arguments.append(jsCallee);
+ if (jsArguments)
+ arguments.append(jsArguments);
+ ASSERT(thisArg);
+ arguments.append(thisArg);
+
+ callPreflight();
+
+ LValue call = m_out.call(m_out.patchpointInt64Intrinsic(), arguments);
+ setInstructionCallingConvention(call, LLVMCCallConv);
+
+ m_ftlState.jsCallVarargses.append(JSCallVarargs(stackmapID, m_node));
+
+ setJSValue(call);
+ }
+
+ void compileLoadVarargs()
+ {
+ LoadVarargsData* data = m_node->loadVarargsData();
+ LValue jsArguments = lowJSValue(m_node->child1());
+
+ LValue length = vmCall(
+ m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
+ m_out.constInt32(data->offset));
+
+ // FIXME: There is a chance that we will call an effectful length property twice. This is safe
+ // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
+ // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
+ // past the sizing.
+ // https://bugs.webkit.org/show_bug.cgi?id=141448
+
+ LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
+ speculate(
+ VarargsOverflow, noValue(), nullptr,
+ m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
+
+ m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
+
+ // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
+ // of a VirtualRegister, we wouldn't have to do this.
+ // https://bugs.webkit.org/show_bug.cgi?id=141660
+ LValue machineStart = m_out.lShr(
+ m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
+ m_out.constIntPtr(3));
+
+ vmCall(
+ m_out.operation(operationLoadVarargs), m_callFrame,
+ m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
+ length, m_out.constInt32(data->mandatoryMinimum));
+ }
+
+ void compileForwardVarargs()
+ {
+ LoadVarargsData* data = m_node->loadVarargsData();
+ InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+
+ LValue length = getArgumentsLength(inlineCallFrame).value;
+ LValue lengthIncludingThis = m_out.add(length, m_out.constInt32(1 - data->offset));
+
+ speculate(
+ VarargsOverflow, noValue(), nullptr,
+ m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
+
+ m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
+
+ LValue sourceStart = getArgumentsStart(inlineCallFrame);
+ LValue targetStart = addressFor(data->machineStart).value();
+
+ LBasicBlock undefinedLoop = FTL_NEW_BLOCK(m_out, ("ForwardVarargs undefined loop body"));
+ LBasicBlock mainLoopEntry = FTL_NEW_BLOCK(m_out, ("ForwardVarargs main loop entry"));
+ LBasicBlock mainLoop = FTL_NEW_BLOCK(m_out, ("ForwardVarargs main loop body"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ForwardVarargs continuation"));
+
+ LValue lengthAsPtr = m_out.zeroExtPtr(length);
+ ValueFromBlock loopBound = m_out.anchor(m_out.constIntPtr(data->mandatoryMinimum));
+ m_out.branch(
+ m_out.above(loopBound.value(), lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
+
+ LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
+ LValue previousIndex = m_out.phi(m_out.intPtr, loopBound);
+ LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
+ m_out.store64(
+ m_out.constInt64(JSValue::encode(jsUndefined())),
+ m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
+ ValueFromBlock nextIndex = m_out.anchor(currentIndex);
+ addIncoming(previousIndex, nextIndex);
+ m_out.branch(
+ m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
+
+ m_out.appendTo(mainLoopEntry, mainLoop);
+ loopBound = m_out.anchor(lengthAsPtr);
+ m_out.branch(m_out.notNull(loopBound.value()), unsure(mainLoop), unsure(continuation));
+
+ m_out.appendTo(mainLoop, continuation);
+ previousIndex = m_out.phi(m_out.intPtr, loopBound);
+ currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
+ LValue value = m_out.load64(
+ m_out.baseIndex(
+ m_heaps.variables, sourceStart,
+ m_out.add(currentIndex, m_out.constIntPtr(data->offset))));
+ m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
+ nextIndex = m_out.anchor(currentIndex);
+ addIncoming(previousIndex, nextIndex);
+ m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
void compileJump()
{
m_out.jump(lowBlock(m_node->targetBlock()));
m_out.appendTo(isNotInt, isDouble);
m_out.branch(
- isCellOrMisc(boxedValue),
+ isCellOrMisc(boxedValue, provenType(m_node->child1())),
usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
m_out.appendTo(isDouble, innerLastNext);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
}
m_out.appendTo(switchOnInts, lastNext);
LBasicBlock isStringCase = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar is string"));
m_out.branch(
- isNotCell(unboxedValue),
+ isNotCell(unboxedValue, provenType(m_node->child1())),
unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
LValue cellValue = unboxedValue;
m_out.branch(
- isNotString(cellValue),
+ isNotString(cellValue, provenType(m_node->child1())),
unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
m_out.appendTo(isStringCase, lastNext);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
- return;
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
}
LBasicBlock lengthIs1 = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar length is 1"));
return;
}
- case SwitchString:
- LOWERING_FAILED(m_node, "Unimplemented");
- break;
+ case SwitchString: {
+ switch (m_node->child1().useKind()) {
+ case StringIdentUse: {
+ LValue stringImpl = lowStringIdent(m_node->child1());
+
+ Vector<SwitchCase> cases;
+ for (unsigned i = 0; i < data->cases.size(); ++i) {
+ LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
+ LBasicBlock block = lowBlock(data->cases[i].target.block);
+ Weight weight = Weight(data->cases[i].target.count);
+ cases.append(SwitchCase(value, block, weight));
+ }
+
+ m_out.switchInstruction(
+ stringImpl, cases, lowBlock(data->fallThrough.block),
+ Weight(data->fallThrough.count));
+ return;
+ }
+
+ case StringUse: {
+ switchString(data, lowString(m_node->child1()));
+ return;
+ }
+
+ case UntypedUse: {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellBlock = FTL_NEW_BLOCK(m_out, ("Switch/SwitchString Untyped cell case"));
+ LBasicBlock isStringBlock = FTL_NEW_BLOCK(m_out, ("Switch/SwitchString Untyped string case"));
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())),
+ unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
+
+ m_out.branch(
+ isString(value, provenType(m_node->child1())),
+ unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
+
+ m_out.appendTo(isStringBlock, lastNext);
+
+ switchString(data, value);
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+ return;
}
+
+ case SwitchCell: {
+ LValue cell;
+ switch (m_node->child1().useKind()) {
+ case CellUse: {
+ cell = lowCell(m_node->child1());
+ break;
+ }
+
+ case UntypedUse: {
+ LValue value = lowJSValue(m_node->child1());
+ LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("Switch/SwitchCell cell case"));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())),
+ unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
+ m_out.appendTo(cellCase);
+ cell = value;
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+
+ buildSwitch(m_node->switchData(), m_out.intPtr, cell);
+ return;
+ } }
- LOWERING_FAILED(m_node, "Bad switch kind");
+ DFG_CRASH(m_graph, m_node, "Bad switch kind");
}
void compileReturn()
void compileInvalidationPoint()
{
if (verboseCompilationEnabled())
- dataLog(" Invalidation point with availability: ", m_availability, "\n");
+ dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
m_ftlState.jitCode->osrExit.append(OSRExit(
UncountableInvalidation, InvalidValueFormat, MethodOfGettingAValueProfile(),
m_codeOriginForExitTarget, m_codeOriginForExitProfile,
- m_availability.numberOfArguments(), m_availability.numberOfLocals()));
+ availabilityMap().m_locals.numberOfArguments(),
+ availabilityMap().m_locals.numberOfLocals()));
m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
OSRExit& exit = m_ftlState.jitCode->osrExit.last();
info.m_isInvalidationPoint = true;
}
- void compileCheckArgumentsNotCreated()
- {
- ASSERT(!isEmptySpeculation(
- m_state.variables().operand(
- m_graph.argumentsRegisterFor(m_node->origin.semantic)).m_type));
-
- checkArgumentsNotCreated();
- }
-
void compileIsUndefined()
{
setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
void compileIsBoolean()
{
- setBoolean(isBoolean(lowJSValue(m_node->child1())));
+ setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
}
void compileIsNumber()
{
- setBoolean(isNumber(lowJSValue(m_node->child1())));
+ setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
}
void compileIsString()
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("IsString continuation"));
ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
- m_out.branch(isCell(value), unsure(isCellCase), unsure(continuation));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
- ValueFromBlock cellResult = m_out.anchor(isString(value));
+ ValueFromBlock cellResult = m_out.anchor(isString(value, provenType(m_node->child1())));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
setBoolean(m_out.phi(m_out.boolean, notCellResult, cellResult));
}
-
+
void compileIsObject()
{
- LValue pointerResult = vmCall(
- m_out.operation(operationIsObject), m_callFrame, lowJSValue(m_node->child1()));
- setBoolean(m_out.notNull(pointerResult));
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = FTL_NEW_BLOCK(m_out, ("IsObject cell case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("IsObject continuation"));
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
+ ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(m_out.boolean, notCellResult, cellResult));
+ }
+
+ void compileIsObjectOrNull()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull cell case"));
+ LBasicBlock notFunctionCase = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull not function case"));
+ LBasicBlock objectCase = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull object case"));
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull slow path"));
+ LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull not cell case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("IsObjectOrNull continuation"));
+
+ m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
+ ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isFunction(value, provenType(child)),
+ unsure(continuation), unsure(notFunctionCase));
+
+ m_out.appendTo(notFunctionCase, objectCase);
+ ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isObject(value, provenType(child)),
+ unsure(objectCase), unsure(continuation));
+
+ m_out.appendTo(objectCase, slowPath);
+ ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child)),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(slowPath, notCellCase);
+ LValue slowResultValue = vmCall(
+ m_out.operation(operationObjectIsObject), m_callFrame, weakPointer(globalObject),
+ value);
+ ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+ LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
+ ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(
+ m_out.boolean,
+ isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
+ setBoolean(result);
}
void compileIsFunction()
{
- LValue pointerResult = vmCall(
- m_out.operation(operationIsFunction), lowJSValue(m_node->child1()));
- setBoolean(m_out.notNull(pointerResult));
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("IsFunction cell case"));
+ LBasicBlock notFunctionCase = FTL_NEW_BLOCK(m_out, ("IsFunction not function case"));
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("IsFunction slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("IsFunction continuation"));
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
+ ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ isFunction(value, provenType(child)),
+ unsure(continuation), unsure(notFunctionCase));
+
+ m_out.appendTo(notFunctionCase, slowPath);
+ ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child)),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(slowPath, continuation);
+ LValue slowResultValue = vmCall(
+ m_out.operation(operationObjectIsFunction), m_callFrame, weakPointer(globalObject),
+ value);
+ ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(
+ m_out.boolean, notCellResult, functionResult, objectResult, slowResult);
+ setBoolean(result);
}
+ void compileTypeOf()
+ {
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("TypeOf continuation"));
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+
+ Vector<ValueFromBlock> results;
+
+ buildTypeOf(
+ child, value,
+ [&] (TypeofType type) {
+ results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
+ m_out.jump(continuation);
+ });
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.int64, results));
+ }
+
+ void compileIn()
+ {
+ Edge base = m_node->child2();
+ LValue cell = lowCell(base);
+ speculateObject(base, cell);
+ if (JSString* string = m_node->child1()->dynamicCastConstant<JSString*>()) {
+ if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
+
+ const auto str = static_cast<const AtomicStringImpl*>(string->tryGetValueImpl());
+ unsigned stackmapID = m_stackmapIDs++;
+
+ LValue call = m_out.call(
+ m_out.patchpointInt64Intrinsic(),
+ m_out.constInt64(stackmapID), m_out.constInt32(sizeOfIn()),
+ constNull(m_out.ref8), m_out.constInt32(1), cell);
+
+ setInstructionCallingConvention(call, LLVMAnyRegCallConv);
+
+ m_ftlState.checkIns.append(CheckInDescriptor(stackmapID, m_node->origin.semantic, str));
+ setJSValue(call);
+ return;
+ }
+ }
+
+ setJSValue(vmCall(m_out.operation(operationGenericIn), m_callFrame, cell, lowJSValue(m_node->child1())));
+ }
+
void compileCheckHasInstance()
{
speculate(
LValue condition;
if (m_node->child1().useKind() == UntypedUse)
- condition = isCell(cell);
+ condition = isCell(cell, provenType(m_node->child1()));
else
condition = m_out.booleanTrue;
LBasicBlock lastNext = m_out.appendTo(isCellCase, loop);
- speculate(BadType, noValue(), 0, isNotObject(prototype));
+ speculate(BadType, noValue(), 0, isNotObject(prototype, provenType(m_node->child2())));
ValueFromBlock originalValue = m_out.anchor(cell);
m_out.jump(loop);
emitStoreBarrier(lowCell(m_node->child1()));
}
- void compileStoreBarrierWithNullCheck()
+ void compileHasIndexedProperty()
{
-#if ENABLE(GGC)
- LBasicBlock isNotNull = FTL_NEW_BLOCK(m_out, ("Store barrier with null check value not null"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Store barrier continuation"));
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
+ m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
+
+ LBasicBlock checkHole = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty int/contiguous check hole"));
+ LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty int/contiguous slow case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty int/contiguous continuation"));
+
+ if (!m_node->arrayMode().isInBounds()) {
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(checkHole));
+ } else
+ m_out.jump(checkHole);
+
+ LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase);
+ ValueFromBlock checkHoleResult = m_out.anchor(
+ m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_node->child2()))));
+ m_out.branch(checkHoleResult.value(), usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(m_out.operation(operationHasIndexedProperty), m_callFrame, base, index)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(m_out.boolean, checkHoleResult, slowResult));
+ return;
+ }
+ case Array::Double: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
+
+ LBasicBlock checkHole = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty double check hole"));
+ LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty double slow case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("HasIndexedProperty double continuation"));
+
+ if (!m_node->arrayMode().isInBounds()) {
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(checkHole));
+ } else
+ m_out.jump(checkHole);
+ LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase);
+ LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_node->child2()));
+ ValueFromBlock checkHoleResult = m_out.anchor(m_out.doubleEqual(doubleValue, doubleValue));
+ m_out.branch(checkHoleResult.value(), usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(m_out.operation(operationHasIndexedProperty), m_callFrame, base, index)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(m_out.boolean, checkHoleResult, slowResult));
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+ }
+
+ void compileHasGenericProperty()
+ {
LValue base = lowJSValue(m_node->child1());
- m_out.branch(m_out.isZero64(base), unsure(continuation), unsure(isNotNull));
- LBasicBlock lastNext = m_out.appendTo(isNotNull, continuation);
- emitStoreBarrier(base);
+ LValue property = lowCell(m_node->child2());
+ setJSValue(vmCall(m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
+ }
+
+ void compileHasStructureProperty()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue property = lowString(m_node->child2());
+ LValue enumerator = lowCell(m_node->child3());
+
+ LBasicBlock correctStructure = FTL_NEW_BLOCK(m_out, ("HasStructureProperty correct structure"));
+ LBasicBlock wrongStructure = FTL_NEW_BLOCK(m_out, ("HasStructureProperty wrong structure"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("HasStructureProperty continuation"));
+
+ m_out.branch(m_out.notEqual(
+ m_out.load32(base, m_heaps.JSCell_structureID),
+ m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
+ rarely(wrongStructure), usually(correctStructure));
+
+ LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
+ ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(wrongStructure, continuation);
+ ValueFromBlock wrongStructureResult = m_out.anchor(
+ m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(m_out.boolean, correctStructureResult, wrongStructureResult));
+ }
+
+ void compileGetDirectPname()
+ {
+ LValue base = lowCell(m_graph.varArgChild(m_node, 0));
+ LValue property = lowCell(m_graph.varArgChild(m_node, 1));
+ LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
+ LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
+
+ LBasicBlock checkOffset = FTL_NEW_BLOCK(m_out, ("GetDirectPname check offset"));
+ LBasicBlock inlineLoad = FTL_NEW_BLOCK(m_out, ("GetDirectPname inline load"));
+ LBasicBlock outOfLineLoad = FTL_NEW_BLOCK(m_out, ("GetDirectPname out-of-line load"));
+ LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetDirectPname slow case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetDirectPname continuation"));
+
+ m_out.branch(m_out.notEqual(
+ m_out.load32(base, m_heaps.JSCell_structureID),
+ m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
+ rarely(slowCase), usually(checkOffset));
+
+ LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
+ m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
+ unsure(outOfLineLoad), unsure(inlineLoad));
+
+ m_out.appendTo(inlineLoad, outOfLineLoad);
+ ValueFromBlock inlineResult = m_out.anchor(
+ m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
+ base, m_out.zeroExt(index, m_out.int64), ScaleEight, JSObject::offsetOfInlineStorage())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfLineLoad, slowCase);
+ LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
+ LValue realIndex = m_out.signExt(
+ m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))),
+ m_out.int64);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ ValueFromBlock outOfLineResult = m_out.anchor(
+ m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowCaseResult = m_out.anchor(
+ vmCall(m_out.operation(operationGetByVal), m_callFrame, base, property));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.int64, inlineResult, outOfLineResult, slowCaseResult));
+ }
+
+ void compileGetEnumerableLength()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
+ }
+
+ void compileGetPropertyEnumerator()
+ {
+ LValue base = lowCell(m_node->child1());
+ setJSValue(vmCall(m_out.operation(operationGetPropertyEnumerator), m_callFrame, base));
+ }
+
+ void compileGetEnumeratorStructurePname()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ LBasicBlock inBounds = FTL_NEW_BLOCK(m_out, ("GetEnumeratorStructurePname in bounds"));
+ LBasicBlock outOfBounds = FTL_NEW_BLOCK(m_out, ("GetEnumeratorStructurePname out of bounds"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetEnumeratorStructurePname continuation"));
+
+ m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
+ usually(inBounds), rarely(outOfBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
+ LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
+ ValueFromBlock inBoundsResult = m_out.anchor(
+ m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfBounds, continuation);
+ ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+ m_out.jump(continuation);
+
m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.int64, inBoundsResult, outOfBoundsResult));
+ }
+
+ void compileGetEnumeratorGenericPname()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ LBasicBlock inBounds = FTL_NEW_BLOCK(m_out, ("GetEnumeratorGenericPname in bounds"));
+ LBasicBlock outOfBounds = FTL_NEW_BLOCK(m_out, ("GetEnumeratorGenericPname out of bounds"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetEnumeratorGenericPname continuation"));
+
+ m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
+ usually(inBounds), rarely(outOfBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
+ LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
+ ValueFromBlock inBoundsResult = m_out.anchor(
+ m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfBounds, continuation);
+ ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(m_out.int64, inBoundsResult, outOfBoundsResult));
+ }
+
+ void compileToIndexString()
+ {
+ LValue index = lowInt32(m_node->child1());
+ setJSValue(vmCall(m_out.operation(operationToIndexString), m_callFrame, index));
+ }
+
+ void compileCheckStructureImmediate()
+ {
+ LValue structure = lowCell(m_node->child1());
+ checkStructure(
+ structure, noValue(), BadCache, m_node->structureSet(),
+ [this] (Structure* structure) {
+ return weakStructure(structure);
+ });
+ }
+
+ void compileMaterializeNewObject()
+ {
+ ObjectMaterializationData& data = m_node->objectMaterializationData();
+
+ // Lower the values first, to avoid creating values inside a control flow diamond.
+
+ Vector<LValue, 8> values;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i)
+ values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
+
+ StructureSet set;
+ m_interpreter.phiChildren()->forAllTransitiveIncomingValues(
+ m_graph.varArgChild(m_node, 0).node(),
+ [&] (Node* incoming) {
+ set.add(incoming->castConstant<Structure*>());
+ });
+
+ Vector<LBasicBlock, 1> blocks(set.size());
+ for (unsigned i = set.size(); i--;)
+ blocks[i] = FTL_NEW_BLOCK(m_out, ("MaterializeNewObject case ", i));
+ LBasicBlock dummyDefault = FTL_NEW_BLOCK(m_out, ("MaterializeNewObject default case"));
+ LBasicBlock outerContinuation = FTL_NEW_BLOCK(m_out, ("MaterializeNewObject continuation"));
+
+ Vector<SwitchCase, 1> cases(set.size());
+ for (unsigned i = set.size(); i--;)
+ cases[i] = SwitchCase(weakStructure(set[i]), blocks[i], Weight(1));
+ m_out.switchInstruction(
+ lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
+
+ LBasicBlock outerLastNext = m_out.m_nextBlock;
+
+ Vector<ValueFromBlock, 1> results;
+
+ for (unsigned i = set.size(); i--;) {
+ m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
+
+ Structure* structure = set[i];
+
+ LValue object;
+ LValue butterfly;
+
+ if (structure->outOfLineCapacity()) {
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
+
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("MaterializeNewObject complex object allocation slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("MaterializeNewObject complex object allocation continuation"));
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ LValue endOfStorage = allocateBasicStorageAndGetEnd(
+ m_out.constIntPtr(structure->outOfLineCapacity() * sizeof(JSValue)),
+ slowPath);
+
+ LValue fastButterflyValue = m_out.add(
+ m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage);
+
+ LValue fastObjectValue = allocateObject(
+ m_out.constIntPtr(allocator), structure, fastButterflyValue, slowPath);
+
+ ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
+ ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ ValueFromBlock slowObject = m_out.anchor(vmCall(
+ m_out.operation(operationNewObjectWithButterfly),
+ m_callFrame, m_out.constIntPtr(structure)));
+ ValueFromBlock slowButterfly = m_out.anchor(
+ m_out.loadPtr(slowObject.value(), m_heaps.JSObject_butterfly));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ object = m_out.phi(m_out.intPtr, fastObject, slowObject);
+ butterfly = m_out.phi(m_out.intPtr, fastButterfly, slowButterfly);
+ } else {
+ // In the easy case where we can do a one-shot allocation, we simply allocate the
+ // object to directly have the desired structure.
+ object = allocateObject(structure);
+ butterfly = nullptr; // Don't have one, don't need one.
+ }
+
+ for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
+ for (unsigned i = data.m_properties.size(); i--;) {
+ PhantomPropertyValue value = data.m_properties[i];
+ if (m_graph.identifiers()[value.m_identifierNumber] != entry.key)
+ continue;
+
+ LValue base = isInlineOffset(entry.offset) ? object : butterfly;
+ storeProperty(values[i], base, value.m_identifierNumber, entry.offset);
+ break;
+ }
+ }
+
+ results.append(m_out.anchor(object));
+ m_out.jump(outerContinuation);
+ }
+
+ m_out.appendTo(dummyDefault, outerContinuation);
+ m_out.unreachable();
+
+ m_out.appendTo(outerContinuation, outerLastNext);
+ setJSValue(m_out.phi(m_out.intPtr, results));
+ }
+
+ void compileMaterializeCreateActivation()
+ {
+ ObjectMaterializationData& data = m_node->objectMaterializationData();
+
+ Vector<LValue, 8> values;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i)
+ values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
+
+ LValue scope = lowCell(m_graph.varArgChild(m_node, 0));
+ SymbolTable* table = m_node->castOperand<SymbolTable*>();
+ Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
+
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("MaterializeCreateActivation slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("MaterializeCreateActivation continuation"));
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ LValue fastObject = allocateObject<JSLexicalEnvironment>(
+ JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
+
+ m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
+ m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
+
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue callResult = vmCall(
+ m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
+ scope, weakPointer(table));
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue activation = m_out.phi(m_out.intPtr, fastResult, slowResult);
+ RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
+ for (unsigned i = 0; i < data.m_properties.size(); ++i) {
+ m_out.store64(values[i],
+ activation,
+ m_heaps.JSEnvironmentRecord_variables[data.m_properties[i].m_identifierNumber]);
+ }
+
+ if (validationEnabled()) {
+ // Validate to make sure every slot in the scope has one value.
+ ConcurrentJITLocker locker(table->m_lock);
+ for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
+ bool found = false;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i) {
+ if (iter->value.scopeOffset().offset() == data.m_properties[i].m_identifierNumber) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(found, found);
+ }
+ }
+
+ setJSValue(activation);
+ }
+
+#if ENABLE(FTL_NATIVE_CALL_INLINING)
+ LValue getFunctionBySymbol(const CString symbol)
+ {
+ if (!m_ftlState.symbolTable.contains(symbol))
+ return nullptr;
+ if (!getModuleByPathForSymbol(m_ftlState.symbolTable.get(symbol), symbol))
+ return nullptr;
+ return getNamedFunction(m_ftlState.module, symbol.data());
+ }
+
+ bool getModuleByPathForSymbol(const CString path, const CString symbol)
+ {
+ if (m_ftlState.nativeLoadedLibraries.contains(path)) {
+ LValue function = getNamedFunction(m_ftlState.module, symbol.data());
+ if (!isInlinableSize(function)) {
+ // We had no choice but to compile this function, but don't try to inline it ever again.
+ m_ftlState.symbolTable.remove(symbol);
+ return false;
+ }
+ return true;
+ }
+
+ LMemoryBuffer memBuf;
+
+ ASSERT(isX86() || isARM64());
+
+#if PLATFORM(EFL)
+ const CString actualPath = toCString(bundlePath().data(), "/runtime/", path.data());
#else
- speculate(m_node->child1());
+ const CString actualPath = toCString(bundlePath().data(),
+ isX86() ? "/Resources/Runtime/x86_64/" : "/Resources/Runtime/arm64/",
+ path.data());
#endif
+
+ char* outMsg;
+
+ if (createMemoryBufferWithContentsOfFile(actualPath.data(), &memBuf, &outMsg)) {
+ if (Options::verboseFTLFailure())
+ dataLog("Failed to load module at ", actualPath, "\n for symbol ", symbol, "\nERROR: ", outMsg, "\n");
+ disposeMessage(outMsg);
+ return false;
+ }
+
+ LModule module;
+
+ if (parseBitcodeInContext(m_ftlState.context, memBuf, &module, &outMsg)) {
+ if (Options::verboseFTLFailure())
+ dataLog("Failed to parse module at ", actualPath, "\n for symbol ", symbol, "\nERROR: ", outMsg, "\n");
+ disposeMemoryBuffer(memBuf);
+ disposeMessage(outMsg);
+ return false;
+ }
+
+ disposeMemoryBuffer(memBuf);
+
+ if (LValue function = getNamedFunction(m_ftlState.module, symbol.data())) {
+ if (!isInlinableSize(function)) {
+ m_ftlState.symbolTable.remove(symbol);
+ disposeModule(module);
+ return false;
+ }
+ }
+
+ Vector<CString> namedFunctions;
+ for (LValue function = getFirstFunction(module); function; function = getNextFunction(function)) {
+ CString functionName(getValueName(function));
+ namedFunctions.append(functionName);
+
+ for (LBasicBlock basicBlock = getFirstBasicBlock(function); basicBlock; basicBlock = getNextBasicBlock(basicBlock)) {
+ for (LValue instruction = getFirstInstruction(basicBlock); instruction; instruction = getNextInstruction(instruction)) {
+ setMetadata(instruction, m_tbaaKind, nullptr);
+ setMetadata(instruction, m_tbaaStructKind, nullptr);
+ }
+ }
+ }
+
+ Vector<CString> namedGlobals;
+ for (LValue global = getFirstGlobal(module); global; global = getNextGlobal(global)) {
+ CString globalName(getValueName(global));
+ namedGlobals.append(globalName);
+ }
+
+ if (linkModules(m_ftlState.module, module, LLVMLinkerDestroySource, &outMsg)) {
+ if (Options::verboseFTLFailure())
+ dataLog("Failed to link module at ", actualPath, "\n for symbol ", symbol, "\nERROR: ", outMsg, "\n");
+ disposeMessage(outMsg);
+ return false;
+ }
+
+ for (CString* symbol = namedFunctions.begin(); symbol != namedFunctions.end(); ++symbol) {
+ LValue function = getNamedFunction(m_ftlState.module, symbol->data());
+ LLVMLinkage linkage = getLinkage(function);
+ if (linkage != LLVMInternalLinkage && linkage != LLVMPrivateLinkage)
+ setVisibility(function, LLVMHiddenVisibility);
+ if (!isDeclaration(function)) {
+ setLinkage(function, LLVMPrivateLinkage);
+ setLinkage(function, LLVMAvailableExternallyLinkage);
+
+ if (ASSERT_DISABLED)
+ removeFunctionAttr(function, LLVMStackProtectAttribute);
+ }
+ }
+
+ for (CString* symbol = namedGlobals.begin(); symbol != namedGlobals.end(); ++symbol) {
+ LValue global = getNamedGlobal(m_ftlState.module, symbol->data());
+ LLVMLinkage linkage = getLinkage(global);
+ if (linkage != LLVMInternalLinkage && linkage != LLVMPrivateLinkage)
+ setVisibility(global, LLVMHiddenVisibility);
+ if (!isDeclaration(global))
+ setLinkage(global, LLVMPrivateLinkage);
+ }
+
+ m_ftlState.nativeLoadedLibraries.add(path);
+ return true;
+ }
+#endif
+
+ bool isInlinableSize(LValue function)
+ {
+ size_t instructionCount = 0;
+ size_t maxSize = Options::maximumLLVMInstructionCountForNativeInlining();
+ for (LBasicBlock basicBlock = getFirstBasicBlock(function); basicBlock; basicBlock = getNextBasicBlock(basicBlock)) {
+ for (LValue instruction = getFirstInstruction(basicBlock); instruction; instruction = getNextInstruction(instruction)) {
+ if (++instructionCount >= maxSize)
+ return false;
+ }
+ }
+ return true;
}
LValue didOverflowStack()
case PutById:
case Call:
case Construct:
+ case NativeCall:
+ case NativeConstruct:
return m_out.below(
m_callFrame,
m_out.loadPtr(
return m_out.booleanFalse;
}
+ struct ArgumentsLength {
+ ArgumentsLength()
+ : isKnown(false)
+ , known(UINT_MAX)
+ , value(nullptr)
+ {
+ }
+
+ bool isKnown;
+ unsigned known;
+ LValue value;
+ };
+ ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
+ {
+ ArgumentsLength length;
+
+ if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
+ length.known = inlineCallFrame->arguments.size() - 1;
+ length.isKnown = true;
+ length.value = m_out.constInt32(length.known);
+ } else {
+ length.known = UINT_MAX;
+ length.isKnown = false;
+
+ VirtualRegister argumentCountRegister;
+ if (!inlineCallFrame)
+ argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
+ else
+ argumentCountRegister = inlineCallFrame->argumentCountRegister;
+ length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
+ }
+
+ return length;
+ }
+
+ ArgumentsLength getArgumentsLength()
+ {
+ return getArgumentsLength(m_node->origin.semantic.inlineCallFrame);
+ }
+
+ LValue getCurrentCallee()
+ {
+ if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame) {
+ if (frame->isClosureCall)
+ return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
+ return weakPointer(frame->calleeRecovery.constant().asCell());
+ }
+ return m_out.loadPtr(addressFor(JSStack::Callee));
+ }
+
+ LValue getArgumentsStart(InlineCallFrame* inlineCallFrame)
+ {
+ VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame);
+ return addressFor(start).value();
+ }
+
+ LValue getArgumentsStart()
+ {
+ return getArgumentsStart(m_node->origin.semantic.inlineCallFrame);
+ }
+
+ template<typename Functor>
+ void checkStructure(
+ LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
+ const StructureSet& set, const Functor& weakStructureDiscriminant)
+ {
+ if (set.size() == 1) {
+ speculate(
+ exitKind, formattedValue, 0,
+ m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
+ return;
+ }
+
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("checkStructure continuation"));
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+ for (unsigned i = 0; i < set.size() - 1; ++i) {
+ LBasicBlock nextStructure = FTL_NEW_BLOCK(m_out, ("checkStructure nextStructure"));
+ m_out.branch(
+ m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
+ unsure(continuation), unsure(nextStructure));
+ m_out.appendTo(nextStructure);
+ }
+
+ speculate(
+ exitKind, formattedValue, 0,
+ m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
+
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+
LValue numberOrNotCellToInt32(Edge edge, LValue value)
{
LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("ValueToInt32 int case"));
m_out.jump(continuation);
} else {
m_out.appendTo(notIntCase, doubleCase);
- m_out.branch(isCellOrMisc(value), unsure(notNumberCase), unsure(doubleCase));
+ m_out.branch(
+ isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
m_out.appendTo(doubleCase, notNumberCase);
results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
}
LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
- {
- if (previousStructure->couldHaveIndexingHeader()) {
- return vmCall(
- m_out.operation(
- operationReallocateButterflyToHavePropertyStorageWithInitialCapacity),
- m_callFrame, object);
- }
-
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("allocatePropertyStorage slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("allocatePropertyStorage continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
-
- LValue endOfStorage = allocateBasicStorageAndGetEnd(
- m_out.constIntPtr(initialOutOfLineCapacity * sizeof(JSValue)), slowPath);
-
- ValueFromBlock fastButterfly = m_out.anchor(
- m_out.add(m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage));
-
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowButterfly = m_out.anchor(vmCall(
- m_out.operation(operationAllocatePropertyStorageWithInitialCapacity), m_callFrame));
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
+ {
+ if (previousStructure->couldHaveIndexingHeader()) {
+ return vmCall(
+ m_out.operation(
+ operationReallocateButterflyToHavePropertyStorageWithInitialCapacity),
+ m_callFrame, object);
+ }
- LValue result = m_out.phi(m_out.intPtr, fastButterfly, slowButterfly);
+ LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
m_out.storePtr(result, object, m_heaps.JSObject_butterfly);
-
return result;
}
LValue reallocatePropertyStorage(
LValue object, LValue oldStorage, Structure* previous, Structure* next)
{
- size_t oldSize = previous->outOfLineCapacity() * sizeof(JSValue);
+ size_t oldSize = previous->outOfLineCapacity();
size_t newSize = oldSize * outOfLineGrowthFactor;
- ASSERT_UNUSED(next, newSize == next->outOfLineCapacity() * sizeof(JSValue));
+ ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
if (previous->couldHaveIndexingHeader()) {
- LValue newAllocSize = m_out.constInt64(newSize / sizeof(JSValue));
+ LValue newAllocSize = m_out.constIntPtr(newSize);
return vmCall(m_out.operation(operationReallocateButterflyToGrowPropertyStorage), m_callFrame, object, newAllocSize);
}
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("reallocatePropertyStorage slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("reallocatePropertyStorage continuation"));
+ LValue result = allocatePropertyStorageWithSizeImpl(newSize);
+
+ ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
+ ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
+
+ for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
+ LValue loaded =
+ m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
+ m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
+ }
+
+ m_out.storePtr(result, m_out.address(object, m_heaps.JSObject_butterfly));
+
+ return result;
+ }
+
+ LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
+ {
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("allocatePropertyStorageWithSizeImpl slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("allocatePropertyStorageWithSizeImpl continuation"));
+
LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
- LValue endOfStorage =
- allocateBasicStorageAndGetEnd(m_out.constIntPtr(newSize), slowPath);
+ LValue endOfStorage = allocateBasicStorageAndGetEnd(
+ m_out.constIntPtr(sizeInValues * sizeof(JSValue)), slowPath);
- ValueFromBlock fastButterfly = m_out.anchor(m_out.add(m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage));
+ ValueFromBlock fastButterfly = m_out.anchor(
+ m_out.add(m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage));
m_out.jump(continuation);
m_out.appendTo(slowPath, continuation);
- LValue newAllocSize = m_out.constInt64(newSize / sizeof(JSValue));
-
- LValue storageLocation = vmCall(m_out.operation(operationAllocatePropertyStorage), m_callFrame, newAllocSize);
-
- ValueFromBlock slowButterfly = m_out.anchor(storageLocation);
+ LValue slowButterflyValue;
+ if (sizeInValues == initialOutOfLineCapacity) {
+ slowButterflyValue = vmCall(
+ m_out.operation(operationAllocatePropertyStorageWithInitialCapacity),
+ m_callFrame);
+ } else {
+ slowButterflyValue = vmCall(
+ m_out.operation(operationAllocatePropertyStorage),
+ m_callFrame, m_out.constIntPtr(sizeInValues));
+ }
+ ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
- LValue result = m_out.phi(m_out.intPtr, fastButterfly, slowButterfly);
-
- ptrdiff_t headerSize = -sizeof(JSValue) - sizeof(void *);
- ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize);
-
- for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
- LValue loaded =
- m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
- m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
- }
-
- m_out.storePtr(result, m_out.address(object, m_heaps.JSObject_butterfly));
-
- return result;
+ return m_out.phi(m_out.intPtr, fastButterfly, slowButterfly);
}
LValue getById(LValue base)
{
- StringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
+ auto uid = m_graph.identifiers()[m_node->identifierNumber()];
// Arguments: id, bytes, target, numArgs, args...
unsigned stackmapID = m_stackmapIDs++;
return call;
}
- TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge)
+ TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
{
return m_out.baseIndex(
- heap, storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(edge).m_value);
+ heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
}
void compare(
return;
}
- LOWERING_FAILED(m_node, "Bad use kinds");
+ DFG_CRASH(m_graph, m_node, "Bad use kinds");
}
void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
LBasicBlock leftNotCellCase = FTL_NEW_BLOCK(m_out, ("CompareEqObjectOrOtherToObject left not cell case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CompareEqObjectOrOtherToObject continuation"));
- m_out.branch(isCell(leftValue), unsure(leftCellCase), unsure(leftNotCellCase));
+ m_out.branch(
+ isCell(leftValue, provenType(leftChild)),
+ unsure(leftCellCase), unsure(leftNotCellCase));
LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCell));
return;
}
- LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
- FTL_TYPE_CHECK(
- jsValueValue(cell), edge, filter,
- m_out.equal(structureID, m_out.constInt32(vm().stringStructure->id())));
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
speculate(
BadType, jsValueValue(cell), edge.node(),
m_out.testNonZero8(
setBoolean(m_out.phi(m_out.boolean, fastResult, slowResult));
}
- LValue allocateCell(LValue allocator, Structure* structure, LBasicBlock slowPath)
+ LValue allocateCell(LValue allocator, LBasicBlock slowPath)
{
LBasicBlock success = FTL_NEW_BLOCK(m_out, ("object allocation success"));
m_out.loadPtr(result, m_heaps.JSCell_freeListNext),
allocator, m_heaps.MarkedAllocator_freeListHead);
- m_out.store32(m_out.constInt32(structure->id()), result, m_heaps.JSCell_structureID);
- m_out.store8(m_out.constInt8(structure->indexingType()), result, m_heaps.JSCell_indexingType);
- m_out.store8(m_out.constInt8(structure->typeInfo().type()), result, m_heaps.JSCell_typeInfoType);
- m_out.store8(m_out.constInt8(structure->typeInfo().inlineTypeFlags()), result, m_heaps.JSCell_typeInfoFlags);
- m_out.store8(m_out.constInt8(JSCell::NotMarked), result, m_heaps.JSCell_gcData);
-
+ return result;
+ }
+
+ void storeStructure(LValue object, Structure* structure)
+ {
+ m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
+ m_out.store32(
+ m_out.constInt32(structure->objectInitializationBlob()),
+ object, m_heaps.JSCell_usefulBytes);
+ }
+
+ LValue allocateCell(LValue allocator, Structure* structure, LBasicBlock slowPath)
+ {
+ LValue result = allocateCell(allocator, slowPath);
+ storeStructure(result, structure);
return result;
}
}
template<typename ClassType>
- LValue allocateObject(Structure* structure, LValue butterfly, LBasicBlock slowPath)
+ LValue allocateObject(
+ size_t size, Structure* structure, LValue butterfly, LBasicBlock slowPath)
{
- MarkedAllocator* allocator;
- size_t size = ClassType::allocationSize(0);
- if (ClassType::needsDestruction && ClassType::hasImmortalStructure)
- allocator = &vm().heap.allocatorForObjectWithImmortalStructureDestructor(size);
- else if (ClassType::needsDestruction)
- allocator = &vm().heap.allocatorForObjectWithNormalDestructor(size);
- else
- allocator = &vm().heap.allocatorForObjectWithoutDestructor(size);
+ MarkedAllocator* allocator = &vm().heap.allocatorForObjectOfType<ClassType>(size);
return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, slowPath);
}
+ template<typename ClassType>
+ LValue allocateObject(Structure* structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ return allocateObject<ClassType>(
+ ClassType::allocationSize(0), structure, butterfly, slowPath);
+ }
+
+ template<typename ClassType>
+ LValue allocateVariableSizedObject(
+ LValue size, Structure* structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ static_assert(!(MarkedSpace::preciseStep & (MarkedSpace::preciseStep - 1)), "MarkedSpace::preciseStep must be a power of two.");
+ static_assert(!(MarkedSpace::impreciseStep & (MarkedSpace::impreciseStep - 1)), "MarkedSpace::impreciseStep must be a power of two.");
+
+ LValue subspace = m_out.constIntPtr(&vm().heap.subspaceForObjectOfType<ClassType>());
+
+ LBasicBlock smallCaseBlock = FTL_NEW_BLOCK(m_out, ("allocateVariableSizedObject small case"));
+ LBasicBlock largeOrOversizeCaseBlock = FTL_NEW_BLOCK(m_out, ("allocateVariableSizedObject large or oversize case"));
+ LBasicBlock largeCaseBlock = FTL_NEW_BLOCK(m_out, ("allocateVariableSizedObject large case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("allocateVariableSizedObject continuation"));
+
+ LValue uproundedSize = m_out.add(size, m_out.constInt32(MarkedSpace::preciseStep - 1));
+ LValue isSmall = m_out.below(uproundedSize, m_out.constInt32(MarkedSpace::preciseCutoff));
+ m_out.branch(isSmall, unsure(smallCaseBlock), unsure(largeOrOversizeCaseBlock));
+
+ LBasicBlock lastNext = m_out.appendTo(smallCaseBlock, largeOrOversizeCaseBlock);
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.MarkedSpace_Subspace_preciseAllocators, subspace,
+ m_out.zeroExtPtr(m_out.lShr(uproundedSize, m_out.constInt32(getLSBSet(MarkedSpace::preciseStep)))));
+ ValueFromBlock smallAllocator = m_out.anchor(address.value());
+ m_out.jump(continuation);
+
+ m_out.appendTo(largeOrOversizeCaseBlock, largeCaseBlock);
+ m_out.branch(
+ m_out.below(uproundedSize, m_out.constInt32(MarkedSpace::impreciseCutoff)),
+ usually(largeCaseBlock), rarely(slowPath));
+
+ m_out.appendTo(largeCaseBlock, continuation);
+ address = m_out.baseIndex(
+ m_heaps.MarkedSpace_Subspace_impreciseAllocators, subspace,
+ m_out.zeroExtPtr(m_out.lShr(uproundedSize, m_out.constInt32(getLSBSet(MarkedSpace::impreciseStep)))));
+ ValueFromBlock largeAllocator = m_out.anchor(address.value());
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue allocator = m_out.phi(m_out.intPtr, smallAllocator, largeAllocator);
+
+ return allocateObject(allocator, structure, butterfly, slowPath);
+ }
+
// Returns a pointer to the end of the allocation.
LValue allocateBasicStorageAndGetEnd(LValue size, LBasicBlock slowPath)
{
m_out.loadPtr(m_out.absolute(&allocator.m_currentPayloadEnd)), newRemaining);
}
+ LValue allocateObject(Structure* structure)
+ {
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
+
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("allocateObject slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("allocateObject continuation"));
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ ValueFromBlock fastResult = m_out.anchor(allocateObject(
+ m_out.constIntPtr(allocator), structure, m_out.intPtrZero, slowPath));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ ValueFromBlock slowResult = m_out.anchor(vmCall(
+ m_out.operation(operationNewObject), m_callFrame, m_out.constIntPtr(structure)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(m_out.intPtr, fastResult, slowResult);
+ }
+
struct ArrayValues {
ArrayValues()
: array(0)
LValue typedArrayLength(Edge baseEdge, ArrayMode arrayMode, LValue base)
{
- if (JSArrayBufferView* view = m_graph.tryGetFoldableView(baseEdge.node(), arrayMode))
+ JSArrayBufferView* view = m_graph.tryGetFoldableView(provenValue(baseEdge), arrayMode);
+ if (view)
return m_out.constInt32(view->length());
return m_out.load32NonNegative(base, m_heaps.JSArrayBufferView_length);
}
{
switch (edge.useKind()) {
case BooleanUse:
- return lowBoolean(m_node->child1());
+ return lowBoolean(edge);
case Int32Use:
- return m_out.notZero32(lowInt32(m_node->child1()));
+ return m_out.notZero32(lowInt32(edge));
case DoubleRepUse:
return m_out.doubleNotEqual(lowDouble(edge), m_out.doubleZero);
case ObjectOrOtherUse:
edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
ManualOperandSpeculation));
case StringUse: {
- LValue stringValue = lowString(m_node->child1());
+ LValue stringValue = lowString(edge);
LValue length = m_out.load32NonNegative(stringValue, m_heaps.JSString_length);
return m_out.notEqual(length, m_out.int32Zero);
}
case UntypedUse: {
- LValue value = lowJSValue(m_node->child1());
-
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped slow case"));
- LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped fast case"));
+ LValue value = lowJSValue(edge);
+
+ // Implements the following control flow structure:
+ // if (value is cell) {
+ // if (value is string)
+ // result = !!value->length
+ // else {
+ // do evil things for masquerades-as-undefined
+ // result = true
+ // }
+ // } else if (value is int32) {
+ // result = !!unboxInt32(value)
+ // } else if (value is number) {
+ // result = !!unboxDouble(value)
+ // } else {
+ // result = value == jsTrue
+ // }
+
+ LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped cell case"));
+ LBasicBlock stringCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped string case"));
+ LBasicBlock notStringCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped not string case"));
+ LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped not cell case"));
+ LBasicBlock int32Case = FTL_NEW_BLOCK(m_out, ("Boolify untyped int32 case"));
+ LBasicBlock notInt32Case = FTL_NEW_BLOCK(m_out, ("Boolify untyped not int32 case"));
+ LBasicBlock doubleCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped double case"));
+ LBasicBlock notDoubleCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped not double case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Boolify untyped continuation"));
- m_out.branch(isNotBoolean(value), rarely(slowCase), usually(fastCase));
+ Vector<ValueFromBlock> results;
- LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
- ValueFromBlock fastResult = m_out.anchor(unboxBoolean(value));
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, stringCase);
+ m_out.branch(
+ isString(value, provenType(edge) & SpecCell),
+ unsure(stringCase), unsure(notStringCase));
+
+ m_out.appendTo(stringCase, notStringCase);
+ LValue nonEmptyString = m_out.notZero32(
+ m_out.load32NonNegative(value, m_heaps.JSString_length));
+ results.append(m_out.anchor(nonEmptyString));
m_out.jump(continuation);
- m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
- m_out.operation(operationConvertJSValueToBoolean), m_callFrame, value)));
+ m_out.appendTo(notStringCase, notCellCase);
+ LValue isTruthyObject;
+ if (masqueradesAsUndefinedWatchpointIsStillValid())
+ isTruthyObject = m_out.booleanTrue;
+ else {
+ LBasicBlock masqueradesCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped masquerades case"));
+
+ results.append(m_out.anchor(m_out.booleanTrue));
+
+ m_out.branch(
+ m_out.testIsZero8(
+ m_out.load8(value, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt8(MasqueradesAsUndefined)),
+ usually(continuation), rarely(masqueradesCase));
+
+ m_out.appendTo(masqueradesCase);
+
+ isTruthyObject = m_out.notEqual(
+ m_out.constIntPtr(m_graph.globalObjectFor(m_node->origin.semantic)),
+ m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
+ }
+ results.append(m_out.anchor(isTruthyObject));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, int32Case);
+ m_out.branch(
+ isInt32(value, provenType(edge) & ~SpecCell),
+ unsure(int32Case), unsure(notInt32Case));
+
+ m_out.appendTo(int32Case, notInt32Case);
+ results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notInt32Case, doubleCase);
+ m_out.branch(
+ isNumber(value, provenType(edge) & ~SpecCell),
+ unsure(doubleCase), unsure(notDoubleCase));
+
+ m_out.appendTo(doubleCase, notDoubleCase);
+ // Note that doubleNotEqual() really means not-equal-and-ordered. It will return false
+ // if value is NaN.
+ LValue doubleIsTruthy = m_out.doubleNotEqual(
+ unboxDouble(value), m_out.constDouble(0));
+ results.append(m_out.anchor(doubleIsTruthy));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notDoubleCase, continuation);
+ LValue miscIsTruthy = m_out.equal(
+ value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
+ results.append(m_out.anchor(miscIsTruthy));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
- return m_out.phi(m_out.boolean, fastResult, slowResult);
+ return m_out.phi(m_out.boolean, results);
}
default:
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
return 0;
}
}
LBasicBlock primitiveCase = FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined primitive case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined continuation"));
- m_out.branch(isNotCell(value), unsure(primitiveCase), unsure(cellCase));
+ m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
break;
case CellCaseSpeculatesObject:
FTL_TYPE_CHECK(
- jsValueValue(value), edge, (~SpecCell) | SpecObject,
- m_out.equal(
- m_out.load32(value, m_heaps.JSCell_structureID),
- m_out.constInt32(vm().stringStructure->id())));
+ jsValueValue(value), edge, (~SpecCell) | SpecObject, isNotObject(value));
break;
}
primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
break;
case EqualNullOrUndefined:
- primitiveResult = isOther(value);
+ primitiveResult = isOther(value, provenType(edge));
break;
case SpeculateNullOrUndefined:
FTL_TYPE_CHECK(
Vector<SwitchCase> cases;
for (unsigned i = 0; i < data->cases.size(); ++i) {
cases.append(SwitchCase(
- constInt(type, data->cases[i].value.switchLookupValue()),
+ constInt(type, data->cases[i].value.switchLookupValue(data->kind)),
lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count)));
}
lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
}
+ void switchString(SwitchData* data, LValue string)
+ {
+ bool canDoBinarySwitch = true;
+ unsigned totalLength = 0;
+
+ for (DFG::SwitchCase myCase : data->cases) {
+ StringImpl* string = myCase.value.stringImpl();
+ if (!string->is8Bit()) {
+ canDoBinarySwitch = false;
+ break;
+ }
+ if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
+ canDoBinarySwitch = false;
+ break;
+ }
+ totalLength += string->length();
+ }
+
+ if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
+ switchStringSlow(data, string);
+ return;
+ }
+
+ LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+ LValue length = m_out.load32(string, m_heaps.JSString_length);
+
+ LBasicBlock hasImplBlock = FTL_NEW_BLOCK(m_out, ("Switch/SwitchString has impl case"));
+ LBasicBlock is8BitBlock = FTL_NEW_BLOCK(m_out, ("Switch/SwitchString is 8 bit case"));
+ LBasicBlock slowBlock = FTL_NEW_BLOCK(m_out, ("Switch/SwitchString slow case"));
+
+ m_out.branch(m_out.isNull(stringImpl), unsure(slowBlock), unsure(hasImplBlock));
+
+ LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(slowBlock), unsure(is8BitBlock));
+
+ m_out.appendTo(is8BitBlock, slowBlock);
+
+ LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
+
+ // FIXME: We should propagate branch weight data to the cases of this switch.
+ // https://bugs.webkit.org/show_bug.cgi?id=144368
+
+ Vector<StringSwitchCase> cases;
+ for (DFG::SwitchCase myCase : data->cases)
+ cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
+ std::sort(cases.begin(), cases.end());
+ switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
+
+ m_out.appendTo(slowBlock, lastNext);
+ switchStringSlow(data, string);
+ }
+
+ // The code for string switching is based closely on the same code in the DFG backend. While it
+ // would be nice to reduce the amount of similar-looking code, it seems like this is one of
+ // those algorithms where factoring out the common bits would result in more code than just
+ // duplicating.
+
+ struct StringSwitchCase {
+ StringSwitchCase() { }
+
+ StringSwitchCase(StringImpl* string, LBasicBlock target)
+ : string(string)
+ , target(target)
+ {
+ }
+
+ bool operator<(const StringSwitchCase& other) const
+ {
+ return stringLessThan(*string, *other.string);
+ }
+
+ StringImpl* string;
+ LBasicBlock target;
+ };
+
+ struct CharacterCase {
+ CharacterCase()
+ : character(0)
+ , begin(0)
+ , end(0)
+ {
+ }
+
+ CharacterCase(LChar character, unsigned begin, unsigned end)
+ : character(character)
+ , begin(begin)
+ , end(end)
+ {
+ }
+
+ bool operator<(const CharacterCase& other) const
+ {
+ return character < other.character;
+ }
+
+ LChar character;
+ unsigned begin;
+ unsigned end;
+ };
+
+ void switchStringRecurse(
+ SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
+ unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
+ unsigned checkedExactLength)
+ {
+ LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
+
+ if (begin == end) {
+ m_out.jump(fallThrough);
+ return;
+ }
+
+ unsigned minLength = cases[begin].string->length();
+ unsigned commonChars = minLength;
+ bool allLengthsEqual = true;
+ for (unsigned i = begin + 1; i < end; ++i) {
+ unsigned myCommonChars = numChecked;
+ unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
+ for (unsigned j = numChecked; j < limit; ++j) {
+ if (cases[begin].string->at(j) != cases[i].string->at(j))
+ break;
+ myCommonChars++;
+ }
+ commonChars = std::min(commonChars, myCommonChars);
+ if (minLength != cases[i].string->length())
+ allLengthsEqual = false;
+ minLength = std::min(minLength, cases[i].string->length());
+ }
+
+ if (checkedExactLength) {
+ DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength);
+ DFG_ASSERT(m_graph, m_node, allLengthsEqual);
+ }
+
+ DFG_ASSERT(m_graph, m_node, minLength >= commonChars);
+
+ if (!allLengthsEqual && alreadyCheckedLength < minLength)
+ m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
+ if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
+ m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
+
+ for (unsigned i = numChecked; i < commonChars; ++i) {
+ m_out.check(
+ m_out.notEqual(
+ m_out.load8(buffer, m_heaps.characters8[i]),
+ m_out.constInt8(cases[begin].string->at(i))),
+ unsure(fallThrough));
+ }
+
+ if (minLength == commonChars) {
+ // This is the case where one of the cases is a prefix of all of the other cases.
+ // We've already checked that the input string is a prefix of all of the cases,
+ // so we just check length to jump to that case.
+
+ DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars);
+ for (unsigned i = begin + 1; i < end; ++i)
+ DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars);
+
+ if (allLengthsEqual) {
+ DFG_ASSERT(m_graph, m_node, end == begin + 1);
+ m_out.jump(cases[begin].target);
+ return;
+ }
+
+ m_out.check(
+ m_out.equal(length, m_out.constInt32(commonChars)),
+ unsure(cases[begin].target));
+
+ // We've checked if the length is >= minLength, and then we checked if the length is
+ // == commonChars. We get to this point if it is >= minLength but not == commonChars.
+ // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
+ switchStringRecurse(
+ data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
+ return;
+ }
+
+ // At this point we know that the string is longer than commonChars, and we've only verified
+ // commonChars. Use a binary switch on the next unchecked character, i.e.
+ // string[commonChars].
+
+ DFG_ASSERT(m_graph, m_node, end >= begin + 2);
+
+ LValue uncheckedChar = m_out.load8(buffer, m_heaps.characters8[commonChars]);
+
+ Vector<CharacterCase> characterCases;
+ CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
+ for (unsigned i = begin + 1; i < end; ++i) {
+ LChar currentChar = cases[i].string->at(commonChars);
+ if (currentChar != currentCase.character) {
+ currentCase.end = i;
+ characterCases.append(currentCase);
+ currentCase = CharacterCase(currentChar, i, i + 1);
+ } else
+ currentCase.end = i + 1;
+ }
+ characterCases.append(currentCase);
+
+ Vector<LBasicBlock> characterBlocks;
+ for (CharacterCase& myCase : characterCases)
+ characterBlocks.append(FTL_NEW_BLOCK(m_out, ("Switch/SwitchString case for ", myCase.character, " at index ", commonChars)));
+
+ Vector<SwitchCase> switchCases;
+ for (unsigned i = 0; i < characterCases.size(); ++i) {
+ if (i)
+ DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
+ switchCases.append(SwitchCase(
+ m_out.constInt8(characterCases[i].character), characterBlocks[i], Weight()));
+ }
+ m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
+
+ LBasicBlock lastNext = m_out.m_nextBlock;
+ characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
+ for (unsigned i = 0; i < characterCases.size(); ++i) {
+ m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
+ switchStringRecurse(
+ data, buffer, length, cases, commonChars + 1,
+ characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
+ }
+
+ void switchStringSlow(SwitchData* data, LValue string)
+ {
+ // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
+ // blocks we want to jump to, and then request their addresses after compilation completes.
+ // https://bugs.webkit.org/show_bug.cgi?id=144369
+
+ LValue branchOffset = vmCall(
+ m_out.operation(operationSwitchStringAndGetBranchOffset),
+ m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
+
+ StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
+
+ Vector<SwitchCase> cases;
+ std::unordered_set<int32_t> alreadyHandled; // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
+ for (unsigned i = 0; i < data->cases.size(); ++i) {
+ // FIXME: The fact that we're using the bytecode's switch table means that the
+ // following DFG IR transformation would be invalid.
+ //
+ // Original code:
+ // switch (v) {
+ // case "foo":
+ // case "bar":
+ // things();
+ // break;
+ // default:
+ // break;
+ // }
+ //
+ // New code:
+ // switch (v) {
+ // case "foo":
+ // instrumentFoo();
+ // goto _things;
+ // case "bar":
+ // instrumentBar();
+ // _things:
+ // things();
+ // break;
+ // default:
+ // break;
+ // }
+ //
+ // Luckily, we don't currently do any such transformation. But it's kind of silly that
+ // this is an issue.
+ // https://bugs.webkit.org/show_bug.cgi?id=144635
+
+ DFG::SwitchCase myCase = data->cases[i];
+ StringJumpTable::StringOffsetTable::iterator iter =
+ table.offsetTable.find(myCase.value.stringImpl());
+ DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
+
+ if (!alreadyHandled.insert(iter->value.branchOffset).second)
+ continue;
+
+ cases.append(SwitchCase(
+ m_out.constInt32(iter->value.branchOffset),
+ lowBlock(myCase.target.block), Weight(myCase.target.count)));
+ }
+
+ m_out.switchInstruction(
+ branchOffset, cases, lowBlock(data->fallThrough.block),
+ Weight(data->fallThrough.count));
+ }
+
+ // Calls the functor at the point of code generation where we know what the result type is.
+ // You can emit whatever code you like at that point. Expects you to terminate the basic block.
+ // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
+ // you aren't using this as the terminator of a high-level block, you should create your own
+ // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
+ // calling this. For example:
+ //
+ // LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("My continuation"));
+ // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+ // buildTypeOf(
+ // child, value,
+ // [&] (TypeofType type) {
+ // do things;
+ // m_out.jump(continuation);
+ // });
+ // m_out.appendTo(continuation, lastNext);
+ template<typename Functor>
+ void buildTypeOf(Edge child, LValue value, const Functor& functor)
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ // Implements the following branching structure:
+ //
+ // if (is cell) {
+ // if (is object) {
+ // if (is function) {
+ // return function;
+ // } else if (doesn't have call trap and doesn't masquerade as undefined) {
+ // return object
+ // } else {
+ // return slowPath();
+ // }
+ // } else if (is string) {
+ // return string
+ // } else {
+ // return symbol
+ // }
+ // } else if (is number) {
+ // return number
+ // } else if (is null) {
+ // return object
+ // } else if (is boolean) {
+ // return boolean
+ // } else {
+ // return undefined
+ // }
+
+ LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf cell case"));
+ LBasicBlock objectCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf object case"));
+ LBasicBlock functionCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf function case"));
+ LBasicBlock notFunctionCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf not function case"));
+ LBasicBlock reallyObjectCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf really object case"));
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("buildTypeOf slow path"));
+ LBasicBlock unreachable = FTL_NEW_BLOCK(m_out, ("buildTypeOf unreachable"));
+ LBasicBlock notObjectCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf not object case"));
+ LBasicBlock stringCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf string case"));
+ LBasicBlock symbolCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf symbol case"));
+ LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf not cell case"));
+ LBasicBlock numberCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf number case"));
+ LBasicBlock notNumberCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf not number case"));
+ LBasicBlock notNullCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf not null case"));
+ LBasicBlock booleanCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf boolean case"));
+ LBasicBlock undefinedCase = FTL_NEW_BLOCK(m_out, ("buildTypeOf undefined case"));
+
+ m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
+ m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
+
+ m_out.appendTo(objectCase, functionCase);
+ m_out.branch(
+ isFunction(value, provenType(child) & SpecObject),
+ unsure(functionCase), unsure(notFunctionCase));
+
+ m_out.appendTo(functionCase, notFunctionCase);
+ functor(TypeofType::Function);
+
+ m_out.appendTo(notFunctionCase, reallyObjectCase);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
+ rarely(slowPath), usually(reallyObjectCase));
+
+ m_out.appendTo(reallyObjectCase, slowPath);
+ functor(TypeofType::Object);
+
+ m_out.appendTo(slowPath, unreachable);
+ LValue result = vmCall(
+ m_out.operation(operationTypeOfObjectAsTypeofType), m_callFrame,
+ weakPointer(globalObject), value);
+ Vector<SwitchCase, 3> cases;
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
+ m_out.switchInstruction(result, cases, unreachable, Weight());
+
+ m_out.appendTo(unreachable, notObjectCase);
+ m_out.unreachable();
+
+ m_out.appendTo(notObjectCase, stringCase);
+ m_out.branch(
+ isString(value, provenType(child) & (SpecCell - SpecObject)),
+ unsure(stringCase), unsure(symbolCase));
+
+ m_out.appendTo(stringCase, symbolCase);
+ functor(TypeofType::String);
+
+ m_out.appendTo(symbolCase, notCellCase);
+ functor(TypeofType::Symbol);
+
+ m_out.appendTo(notCellCase, numberCase);
+ m_out.branch(
+ isNumber(value, provenType(child) & ~SpecCell),
+ unsure(numberCase), unsure(notNumberCase));
+
+ m_out.appendTo(numberCase, notNumberCase);
+ functor(TypeofType::Number);
+
+ m_out.appendTo(notNumberCase, notNullCase);
+ LValue isNull;
+ if (provenType(child) & SpecOther)
+ isNull = m_out.equal(value, m_out.constInt64(ValueNull));
+ else
+ isNull = m_out.booleanFalse;
+ m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
+
+ m_out.appendTo(notNullCase, booleanCase);
+ m_out.branch(
+ isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
+ unsure(booleanCase), unsure(undefinedCase));
+
+ m_out.appendTo(booleanCase, undefinedCase);
+ functor(TypeofType::Boolean);
+
+ m_out.appendTo(undefinedCase, lastNext);
+ functor(TypeofType::Undefined);
+ }
+
LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
{
LBasicBlock greatEnough = FTL_NEW_BLOCK(m_out, ("doubleToInt32 greatEnough"));
return m_out.phi(m_out.int32, fastResult, slowResult);
}
- void checkArgumentsNotCreated()
- {
- CodeOrigin codeOrigin = m_node->origin.semantic;
- VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(codeOrigin);
- if (isEmptySpeculation(m_state.variables().operand(argumentsRegister).m_type))
- return;
-
- VirtualRegister argsReg = m_graph.machineArgumentsRegisterFor(codeOrigin);
- speculate(
- ArgumentsEscaped, noValue(), 0,
- m_out.notZero64(m_out.load64(addressFor(argsReg))));
- }
-
void speculate(
ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
{
void terminate(ExitKind kind)
{
- speculate(kind, noValue(), 0, m_out.booleanTrue);
+ speculate(kind, noValue(), nullptr, m_out.booleanTrue);
+ didAlreadyTerminate();
+ }
+
+ void didAlreadyTerminate()
+ {
+ m_state.setIsValid(false);
}
void typeCheck(
ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
+ JSValue value = edge->asJSValue();
if (!value.isInt32()) {
terminate(Uncountable);
return m_out.int32Zero;
return result;
}
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecInt32));
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32));
terminate(Uncountable);
return m_out.int32Zero;
}
enum Int52Kind { StrictInt52, Int52 };
LValue lowInt52(Edge edge, Int52Kind kind)
{
- RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse);
LoweredNodeValue value;
break;
}
- RELEASE_ASSERT(!m_state.forNode(edge).m_type);
+ DFG_ASSERT(m_graph, m_node, !provenType(edge));
terminate(Uncountable);
return m_out.int64Zero;
}
case StrictInt52:
return Int52;
}
- LOWERING_FAILED(m_node, "Bad use kind");
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
return Int52;
}
LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
{
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()));
+ DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()));
if (edge->op() == JSConstant) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
+ JSValue value = edge->asJSValue();
if (!value.isCell()) {
terminate(Uncountable);
return m_out.intPtrZero;
return uncheckedValue;
}
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecCell));
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCell));
terminate(Uncountable);
return m_out.intPtrZero;
}
ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse);
if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
+ JSValue value = edge->asJSValue();
if (!value.isBoolean()) {
terminate(Uncountable);
return m_out.booleanFalse;
return result;
}
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecBoolean));
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean));
terminate(Uncountable);
return m_out.booleanFalse;
}
LValue lowDouble(Edge edge)
{
- RELEASE_ASSERT(isDouble(edge.useKind()));
+ DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()));
LoweredNodeValue value = m_doubleValues.get(edge.node());
if (isValid(value))
return value.value();
-
- RELEASE_ASSERT(!m_state.forNode(edge).m_type);
+ DFG_ASSERT(m_graph, m_node, !provenType(edge));
terminate(Uncountable);
return m_out.doubleZero;
}
LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
{
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
- RELEASE_ASSERT(!isDouble(edge.useKind()));
- RELEASE_ASSERT(edge.useKind() != Int52RepUse);
+ DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
+ DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()));
+ DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse);
if (edge->hasConstant())
- return m_out.constInt64(JSValue::encode(m_graph.valueOfJSConstant(edge.node())));
-
+ return m_out.constInt64(JSValue::encode(edge->asJSValue()));
+
LoweredNodeValue value = m_jsValueValues.get(edge.node());
if (isValid(value))
return value.value();
return result;
}
- LOWERING_FAILED(m_node, "Corrupt array class");
+ DFG_CRASH(m_graph, m_node, "Value not defined");
return 0;
}
return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
}
- LValue isNotInt32(LValue jsValue)
+ LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecInt32))
+ return proven;
+ return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
+ }
+ LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecInt32))
+ return proven;
return m_out.below(jsValue, m_tagTypeNumber);
}
LValue unboxInt32(LValue jsValue)
return m_out.add(m_out.zeroExt(value, m_out.int64), m_tagTypeNumber);
}
- LValue isCellOrMisc(LValue jsValue)
+ LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecCell | SpecMisc))
+ return proven;
return m_out.testIsZero64(jsValue, m_tagTypeNumber);
}
- LValue isNotCellOrMisc(LValue jsValue)
+ LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~(SpecCell | SpecMisc)))
+ return proven;
return m_out.testNonZero64(jsValue, m_tagTypeNumber);
}
{
return m_out.sub(m_out.bitCast(doubleValue, m_out.int64), m_tagTypeNumber);
}
- LValue jsValueToDouble(Edge edge, LValue boxedValue)
- {
- LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing int case"));
- LBasicBlock doubleCase = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing double case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("jsValueToDouble unboxing continuation"));
-
- LValue isNotInt32;
- if (!m_interpreter.needsTypeCheck(edge, SpecInt32))
- isNotInt32 = m_out.booleanFalse;
- else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32))
- isNotInt32 = m_out.booleanTrue;
- else
- isNotInt32 = this->isNotInt32(boxedValue);
- m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
-
- LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
-
- ValueFromBlock intToDouble = m_out.anchor(
- m_out.intToDouble(unboxInt32(boxedValue)));
- m_out.jump(continuation);
-
- m_out.appendTo(doubleCase, continuation);
-
- FTL_TYPE_CHECK(
- jsValueValue(boxedValue), edge, SpecBytecodeNumber, isCellOrMisc(boxedValue));
-
- ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(boxedValue));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- return m_out.phi(m_out.doubleType, intToDouble, unboxedDouble);
- }
LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
{
return possibleResult;
}
-
- LValue isNumber(LValue jsValue)
+
+ LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
+ {
+ LValue integerValue = m_out.fpToInt32(value);
+ LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
+ LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
+ speculate(Overflow, FormattedValue(ValueFormatDouble, value), m_node, valueNotConvertibleToInteger);
+
+ if (shouldCheckNegativeZero) {
+ LBasicBlock valueIsZero = FTL_NEW_BLOCK(m_out, ("ConvertDoubleToInt32 on zero"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ConvertDoubleToInt32 continuation"));
+ m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
+
+ LValue doubleBitcastToInt64 = m_out.bitCast(value, m_out.int64);
+ LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
+
+ speculate(NegativeZero, FormattedValue(ValueFormatDouble, value), m_node, signBitSet);
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+ return integerValue;
+ }
+
+ LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecFullNumber))
+ return proven;
return isNotCellOrMisc(jsValue);
}
- LValue isNotNumber(LValue jsValue)
+ LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecFullNumber))
+ return proven;
return isCellOrMisc(jsValue);
}
- LValue isNotCell(LValue jsValue)
+ LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecCell))
+ return proven;
return m_out.testNonZero64(jsValue, m_tagMask);
}
- LValue isCell(LValue jsValue)
+ LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecCell))
+ return proven;
return m_out.testIsZero64(jsValue, m_tagMask);
}
- LValue isNotMisc(LValue value)
+ LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecMisc))
+ return proven;
return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
}
- LValue isMisc(LValue value)
+ LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecMisc))
+ return proven;
return m_out.bitNot(isNotMisc(value));
}
- LValue isNotBoolean(LValue jsValue)
+ LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecBoolean))
+ return proven;
return m_out.testNonZero64(
m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
m_out.constInt64(~1));
}
- LValue isBoolean(LValue jsValue)
+ LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecBoolean))
+ return proven;
return m_out.bitNot(isNotBoolean(jsValue));
}
LValue unboxBoolean(LValue jsValue)
value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
}
- LValue isNotOther(LValue value)
+ LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, ~SpecOther))
+ return proven;
return m_out.notEqual(
m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
m_out.constInt64(ValueNull));
}
- LValue isOther(LValue value)
+ LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
{
+ if (LValue proven = isProvenValue(type, SpecOther))
+ return proven;
return m_out.equal(
m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
m_out.constInt64(ValueNull));
}
+
+ LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
+ {
+ if (!(provenType & ~wantedType))
+ return m_out.booleanTrue;
+ if (!(provenType & wantedType))
+ return m_out.booleanFalse;
+ return nullptr;
+ }
void speculate(Edge edge)
{
case ObjectUse:
speculateObject(edge);
break;
+ case FunctionUse:
+ speculateFunction(edge);
+ break;
case ObjectOrOtherUse:
speculateObjectOrOther(edge);
break;
case NumberUse:
speculateNumber(edge);
break;
+ case RealNumberUse:
+ speculateRealNumber(edge);
+ break;
case DoubleRepRealUse:
- speculateDoubleReal(edge);
+ speculateDoubleRepReal(edge);
break;
case DoubleRepMachineIntUse:
speculateDoubleRepMachineInt(edge);
speculateMisc(edge);
break;
default:
- LOWERING_FAILED(m_node, "Unsupported speculation use kind");
- return;
+ DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
}
}
jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
}
- LValue isObject(LValue cell)
+ LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
{
- return m_out.notEqual(
- m_out.load32(cell, m_heaps.JSCell_structureID),
- m_out.constInt32(vm().stringStructure->id()));
+ if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
+ return proven;
+ return m_out.aboveOrEqual(
+ m_out.load8(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt8(ObjectType));
}
-
- LValue isNotString(LValue cell)
+
+ LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
{
- return isObject(cell);
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
+ return proven;
+ return m_out.below(
+ m_out.load8(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt8(ObjectType));
}
-
- LValue isString(LValue cell)
+
+ LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
{
- return m_out.equal(
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
+ return proven;
+ return m_out.notEqual(
m_out.load32(cell, m_heaps.JSCell_structureID),
m_out.constInt32(vm().stringStructure->id()));
}
- LValue isNotObject(LValue cell)
+ LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
{
- return isString(cell);
+ if (LValue proven = isProvenValue(type & SpecCell, SpecString))
+ return proven;
+ return m_out.equal(
+ m_out.load32(cell, m_heaps.JSCell_structureID),
+ m_out.constInt32(vm().stringStructure->id()));
}
LValue isArrayType(LValue cell, ArrayMode arrayMode)
switch (arrayMode.arrayClass()) {
case Array::OriginalArray:
- LOWERING_FAILED(m_node, "Unexpected original array");
+ DFG_CRASH(m_graph, m_node, "Unexpected original array");
return 0;
case Array::Array:
m_out.constInt8(arrayMode.shapeMask()));
}
- LOWERING_FAILED(m_node, "Corrupt array class");
- return 0;
+ DFG_CRASH(m_graph, m_node, "Corrupt array class");
}
+ case Array::DirectArguments:
+ return m_out.equal(
+ m_out.load8(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt8(DirectArgumentsType));
+
+ case Array::ScopedArguments:
+ return m_out.equal(
+ m_out.load8(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt8(ScopedArgumentsType));
+
default:
return m_out.equal(
m_out.load8(cell, m_heaps.JSCell_typeInfoType),
}
}
+ LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
+ return proven;
+ return isType(cell, JSFunctionType);
+ }
+ LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
+ return proven;
+ return isNotType(cell, JSFunctionType);
+ }
+
+ LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (!(type & SpecObjectOther))
+ return m_out.booleanFalse;
+ return m_out.testNonZero8(
+ m_out.load8(cell, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt8(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
+ }
+
LValue isType(LValue cell, JSType type)
{
return m_out.equal(
speculateObject(edge, lowCell(edge));
}
+ void speculateFunction(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
+ }
+
+ void speculateFunction(Edge edge)
+ {
+ speculateFunction(edge, lowCell(edge));
+ }
+
void speculateObjectOrOther(Edge edge)
{
if (!m_interpreter.needsTypeCheck(edge))
return;
- LValue value = lowJSValue(edge);
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther cell case"));
LBasicBlock primitiveCase = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther primitive case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther continuation"));
- m_out.branch(isNotCell(value), unsure(primitiveCase), unsure(cellCase));
+ m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
Structure* stringObjectStructure =
m_graph.globalObjectFor(m_node->origin.semantic)->stringObjectStructure();
- if (m_state.forNode(edge).m_currentKnownStructure.isSubsetOf(StructureSet(stringObjectStructure)))
+ if (abstractStructure(edge).isSubsetOf(StructureSet(stringObjectStructure)))
return;
speculate(
NotStringObject, noValue(), 0,
- m_out.notEqual(structureID, weakStructure(stringObjectStructure)));
+ m_out.notEqual(structureID, weakStructureID(stringObjectStructure)));
}
void speculateNonNullObject(Edge edge, LValue cell)
{
- FTL_TYPE_CHECK(
- jsValueValue(cell), edge, SpecObject,
- m_out.equal(
- m_out.load32(cell, m_heaps.JSCell_structureID),
- m_out.constInt32(vm().stringStructure->id())));
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
if (masqueradesAsUndefinedWatchpointIsStillValid())
return;
FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
}
- void speculateDoubleReal(Edge edge)
+ void speculateRealNumber(Edge edge)
+ {
+ // Do an early return here because lowDouble() can create a lot of control flow.
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+ LValue doubleValue = unboxDouble(value);
+
+ LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("speculateRealNumber int case"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("speculateRealNumber continuation"));
+
+ m_out.branch(
+ m_out.doubleEqual(doubleValue, doubleValue),
+ usually(continuation), rarely(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
+
+ typeCheck(
+ jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
+ isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateDoubleRepReal(Edge edge)
{
// Do an early return here because lowDouble() can create a lot of control flow.
if (!m_interpreter.needsTypeCheck(edge))
LBasicBlock isStringCase = FTL_NEW_BLOCK(m_out, ("Speculate NotStringVar is string case"));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Speculate NotStringVar continuation"));
- m_out.branch(isCell(value), unsure(isCellCase), unsure(continuation));
+ m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
- m_out.branch(isString(value), unsure(isStringCase), unsure(continuation));
+ m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
m_out.appendTo(isStringCase, continuation);
speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
// Append to the write barrier buffer.
LBasicBlock lastNext = m_out.appendTo(isMarkedAndNotRemembered, bufferHasSpace);
- LValue currentBufferIndex = m_out.load32(m_out.absolute(&vm().heap.writeBarrierBuffer().m_currentIndex));
- LValue bufferCapacity = m_out.load32(m_out.absolute(&vm().heap.writeBarrierBuffer().m_capacity));
+ LValue currentBufferIndex = m_out.load32(m_out.absolute(vm().heap.writeBarrierBuffer().currentIndexAddress()));
+ LValue bufferCapacity = m_out.constInt32(vm().heap.writeBarrierBuffer().capacity());
m_out.branch(
m_out.lessThan(currentBufferIndex, bufferCapacity),
usually(bufferHasSpace), rarely(bufferIsFull));
// Buffer has space, store to it.
m_out.appendTo(bufferHasSpace, bufferIsFull);
- LValue writeBarrierBufferBase = m_out.loadPtr(m_out.absolute(&vm().heap.writeBarrierBuffer().m_buffer));
- m_out.storePtr(base, m_out.baseIndex(m_heaps.WriteBarrierBuffer_bufferContents, writeBarrierBufferBase, m_out.zeroExt(currentBufferIndex, m_out.intPtr), ScalePtr));
- m_out.store32(m_out.add(currentBufferIndex, m_out.constInt32(1)), m_out.absolute(&vm().heap.writeBarrierBuffer().m_currentIndex));
+ LValue writeBarrierBufferBase = m_out.constIntPtr(vm().heap.writeBarrierBuffer().buffer());
+ m_out.storePtr(base, m_out.baseIndex(m_heaps.WriteBarrierBuffer_bufferContents, writeBarrierBufferBase, m_out.zeroExtPtr(currentBufferIndex)));
+ m_out.store32(m_out.add(currentBufferIndex, m_out.constInt32(1)), m_out.absolute(vm().heap.writeBarrierBuffer().currentIndexAddress()));
m_out.jump(continuation);
// Buffer is out of space, flush it.
m_out.appendTo(bufferIsFull, continuation);
- vmCall(m_out.operation(operationFlushWriteBarrierBuffer), m_callFrame, base, NoExceptions);
+ vmCallNoExceptions(m_out.operation(operationFlushWriteBarrierBuffer), m_callFrame, base);
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
#endif
}
- enum ExceptionCheckMode { NoExceptions, CheckExceptions };
-
- LValue vmCall(LValue function, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, LValue arg2, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1, arg2);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, LValue arg2, LValue arg3, ExceptionCheckMode mode = CheckExceptions)
+ template<typename... Args>
+ LValue vmCall(LValue function, Args... args)
{
callPreflight();
- LValue result = m_out.call(function, arg1, arg2, arg3);
- callCheck(mode);
+ LValue result = m_out.call(function, args...);
+ callCheck();
return result;
}
- LValue vmCall(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, ExceptionCheckMode mode = CheckExceptions)
+
+ template<typename... Args>
+ LValue vmCallNoExceptions(LValue function, Args... args)
{
callPreflight();
- LValue result = m_out.call(function, arg1, arg2, arg3, arg4);
- callCheck(mode);
+ LValue result = m_out.call(function, args...);
return result;
}
-
+
void callPreflight(CodeOrigin codeOrigin)
{
m_out.store32(
callPreflight(m_node->origin.semantic);
}
- void callCheck(ExceptionCheckMode mode = CheckExceptions)
+ void callCheck()
{
- if (mode == NoExceptions)
- return;
-
if (Options::enableExceptionFuzz())
m_out.call(m_out.operation(operationExceptionFuzz));
LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Exception check continuation"));
+ LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
+
m_out.branch(
- m_out.notZero64(m_out.load64(m_out.absolute(vm().addressOfException()))),
- rarely(m_handleExceptions), usually(continuation));
+ m_out.notZero64(exception), rarely(m_handleExceptions), usually(continuation));
m_out.appendTo(continuation);
}
return m_blocks.get(block);
}
- void initializeOSRExitStateForBlock()
- {
- m_availability = m_highBlock->ssa->availabilityAtHead;
- }
-
void appendOSRExit(
ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
{
if (verboseCompilationEnabled()) {
- dataLog(" OSR exit #", m_ftlState.jitCode->osrExit.size(), " with availability: ", m_availability, "\n");
+ dataLog(" OSR exit #", m_ftlState.jitCode->osrExit.size(), " with availability: ", availabilityMap(), "\n");
if (!m_availableRecoveries.isEmpty())
dataLog(" Available recoveries: ", listDump(m_availableRecoveries), "\n");
}
+
+ if (doOSRExitFuzzing()) {
+ LValue numberOfFuzzChecks = m_out.add(
+ m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
+ m_out.int32One);
+
+ m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
+
+ if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
+ failCondition = m_out.bitOr(
+ failCondition,
+ m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
+ }
+ if (unsigned at = Options::fireOSRExitFuzzAt()) {
+ failCondition = m_out.bitOr(
+ failCondition,
+ m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
+ }
+ }
ASSERT(m_ftlState.jitCode->osrExit.size() == m_ftlState.finalizer->osrExit.size());
m_ftlState.jitCode->osrExit.append(OSRExit(
kind, lowValue.format(), m_graph.methodOfGettingAValueProfileFor(highValue),
m_codeOriginForExitTarget, m_codeOriginForExitProfile,
- m_availability.numberOfArguments(), m_availability.numberOfLocals()));
+ availabilityMap().m_locals.numberOfArguments(),
+ availabilityMap().m_locals.numberOfLocals()));
m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
OSRExit& exit = m_ftlState.jitCode->osrExit.last();
- LBasicBlock lastNext = 0;
- LBasicBlock continuation = 0;
+ LBasicBlock lastNext = nullptr;
+ LBasicBlock continuation = nullptr;
LBasicBlock failCase = FTL_NEW_BLOCK(m_out, ("OSR exit failCase for ", m_node));
continuation = FTL_NEW_BLOCK(m_out, ("OSR exit continuation for ", m_node));
if (!!lowValue)
arguments.append(lowValue.value());
+ AvailabilityMap availabilityMap = this->availabilityMap();
+ availabilityMap.pruneByLiveness(m_graph, codeOrigin);
+
+ HashMap<Node*, ExitTimeObjectMaterialization*> map;
+ availabilityMap.forEachAvailability(
+ [&] (Availability availability) {
+ if (!availability.shouldUseNode())
+ return;
+
+ Node* node = availability.node();
+ if (!node->isPhantomAllocation())
+ return;
+
+ auto result = map.add(node, nullptr);
+ if (result.isNewEntry) {
+ result.iterator->value =
+ exit.m_materializations.add(node->op(), node->origin.semantic);
+ }
+ });
+
for (unsigned i = 0; i < exit.m_values.size(); ++i) {
int operand = exit.m_values.operandForIndex(i);
- bool isLive = m_graph.isLiveInBytecode(VirtualRegister(operand), codeOrigin);
- if (!isLive) {
- exit.m_values[i] = ExitValue::dead();
- continue;
- }
- Availability availability = m_availability[i];
- FlushedAt flush = availability.flushedAt();
- switch (flush.format()) {
- case DeadFlush:
- case ConflictingFlush:
- if (availability.hasNode()) {
- addExitArgumentForNode(exit, arguments, i, availability.node());
- break;
- }
-
- if (Options::validateFTLOSRExitLiveness()) {
- dataLog("Expected r", operand, " to be available but it wasn't.\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- // This means that the DFG's DCE proved that the value is dead in bytecode
- // even though the bytecode liveness analysis thinks it's live. This is
- // acceptable since the DFG's DCE is by design more aggressive while still
- // being sound.
- exit.m_values[i] = ExitValue::dead();
- break;
-
- case FlushedJSValue:
- case FlushedCell:
- case FlushedBoolean:
- exit.m_values[i] = ExitValue::inJSStack(flush.virtualRegister());
- break;
-
- case FlushedInt32:
- exit.m_values[i] = ExitValue::inJSStackAsInt32(flush.virtualRegister());
- break;
-
- case FlushedInt52:
- exit.m_values[i] = ExitValue::inJSStackAsInt52(flush.virtualRegister());
- break;
-
- case FlushedDouble:
- exit.m_values[i] = ExitValue::inJSStackAsDouble(flush.virtualRegister());
- break;
-
- case FlushedArguments:
- exit.m_values[i] = ExitValue::argumentsObjectThatWasNotCreated();
- break;
+ Availability availability = availabilityMap.m_locals[i];
+
+ if (Options::validateFTLOSRExitLiveness()) {
+ DFG_ASSERT(
+ m_graph, m_node,
+ (!(availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), codeOrigin))) || m_graph.m_plan.mode == FTLForOSREntryMode);
}
+
+ exit.m_values[i] = exitValueForAvailability(arguments, map, availability);
}
- if (verboseCompilationEnabled())
+ for (auto heapPair : availabilityMap.m_heap) {
+ Node* node = heapPair.key.base();
+ ExitTimeObjectMaterialization* materialization = map.get(node);
+ materialization->add(
+ heapPair.key.descriptor(),
+ exitValueForAvailability(arguments, map, heapPair.value));
+ }
+
+ if (verboseCompilationEnabled()) {
dataLog(" Exit values: ", exit.m_values, "\n");
+ if (!exit.m_materializations.isEmpty()) {
+ dataLog(" Materializations: \n");
+ for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
+ dataLog(" ", pointerDump(materialization), "\n");
+ }
+ }
}
void callStackmap(OSRExit& exit, ExitArgumentList& arguments)
m_out.call(m_out.stackmapIntrinsic(), arguments);
}
- void addExitArgumentForNode(
- OSRExit& exit, ExitArgumentList& arguments, unsigned index, Node* node)
+ ExitValue exitValueForAvailability(
+ ExitArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
+ Availability availability)
+ {
+ FlushedAt flush = availability.flushedAt();
+ switch (flush.format()) {
+ case DeadFlush:
+ case ConflictingFlush:
+ if (availability.hasNode())
+ return exitValueForNode(arguments, map, availability.node());
+
+ // This means that the value is dead. It could be dead in bytecode or it could have
+ // been killed by our DCE, which can sometimes kill things even if they were live in
+ // bytecode.
+ return ExitValue::dead();
+
+ case FlushedJSValue:
+ case FlushedCell:
+ case FlushedBoolean:
+ return ExitValue::inJSStack(flush.virtualRegister());
+
+ case FlushedInt32:
+ return ExitValue::inJSStackAsInt32(flush.virtualRegister());
+
+ case FlushedInt52:
+ return ExitValue::inJSStackAsInt52(flush.virtualRegister());
+
+ case FlushedDouble:
+ return ExitValue::inJSStackAsDouble(flush.virtualRegister());
+ }
+
+ DFG_CRASH(m_graph, m_node, "Invalid flush format");
+ return ExitValue::dead();
+ }
+
+ ExitValue exitValueForNode(
+ ExitArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
+ Node* node)
{
ASSERT(node->shouldGenerate());
ASSERT(node->hasResult());
- if (tryToSetConstantExitArgument(exit, index, node))
- return;
+ if (node) {
+ switch (node->op()) {
+ case BottomValue:
+ // This might arise in object materializations. I actually doubt that it would,
+ // but it seems worthwhile to be conservative.
+ return ExitValue::dead();
+
+ case JSConstant:
+ case Int52Constant:
+ case DoubleConstant:
+ return ExitValue::constant(node->asJSValue());
+
+ default:
+ if (node->isPhantomAllocation())
+ return ExitValue::materializeNewObject(map.get(node));
+ break;
+ }
+ }
for (unsigned i = 0; i < m_availableRecoveries.size(); ++i) {
AvailableRecovery recovery = m_availableRecoveries[i];
if (recovery.node() != node)
continue;
- exit.m_values[index] = ExitValue::recovery(
+ ExitValue result = ExitValue::recovery(
recovery.opcode(), arguments.size(), arguments.size() + 1,
recovery.format());
arguments.append(recovery.left());
arguments.append(recovery.right());
- return;
+ return result;
}
LoweredNodeValue value = m_int32Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatInt32, value.value());
- return;
- }
+ if (isValid(value))
+ return exitArgument(arguments, ValueFormatInt32, value.value());
value = m_int52Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatInt52, value.value());
- return;
- }
+ if (isValid(value))
+ return exitArgument(arguments, ValueFormatInt52, value.value());
value = m_strictInt52Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatStrictInt52, value.value());
- return;
- }
+ if (isValid(value))
+ return exitArgument(arguments, ValueFormatStrictInt52, value.value());
value = m_booleanValues.get(node);
if (isValid(value)) {
LValue valueToPass = m_out.zeroExt(value.value(), m_out.int32);
- addExitArgument(exit, arguments, index, ValueFormatBoolean, valueToPass);
- return;
+ return exitArgument(arguments, ValueFormatBoolean, valueToPass);
}
value = m_jsValueValues.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatJSValue, value.value());
- return;
- }
+ if (isValid(value))
+ return exitArgument(arguments, ValueFormatJSValue, value.value());
value = m_doubleValues.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatDouble, value.value());
- return;
- }
+ if (isValid(value))
+ return exitArgument(arguments, ValueFormatDouble, value.value());
- startCrashing();
- dataLog("Cannot find value for node: ", node, " while compiling exit at ", exit.m_codeOrigin, " in node ", m_node, "\n");
- m_graph.dump();
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- bool tryToSetConstantExitArgument(OSRExit& exit, unsigned index, Node* node)
- {
- if (!node)
- return false;
-
- switch (node->op()) {
- case JSConstant:
- case Int52Constant:
- case DoubleConstant:
- case WeakJSConstant:
- exit.m_values[index] = ExitValue::constant(m_graph.valueOfJSConstant(node));
- return true;
- case PhantomArguments:
- exit.m_values[index] = ExitValue::argumentsObjectThatWasNotCreated();
- return true;
- default:
- return false;
- }
+ DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
+ return ExitValue::dead();
}
- void addExitArgument(
- OSRExit& exit, ExitArgumentList& arguments, unsigned index, ValueFormat format,
- LValue value)
+ ExitValue exitArgument(ExitArgumentList& arguments, ValueFormat format, LValue value)
{
- exit.m_values[index] = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
+ ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
arguments.append(value);
+ return result;
}
bool doesKill(Edge edge)
return;
}
- LOWERING_FAILED(m_node, "Corrupt int52 kind");
+ DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
}
void setJSValue(Node* node, LValue value)
{
LValue tableIndex = m_out.load32(value, m_heaps.JSCell_structureID);
LValue tableBase = m_out.loadPtr(
m_out.absolute(vm().heap.structureIDTable().base()));
- LValue pointerIntoTable = m_out.baseIndex(
- tableBase, m_out.zeroExt(tableIndex, m_out.intPtr), ScaleEight);
- return m_out.loadPtr(TypedPointer(m_heaps.structureTable, pointerIntoTable));
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
+ return m_out.loadPtr(address);
}
LValue weakPointer(JSCell* pointer)
return m_out.constIntPtr(pointer);
}
- LValue weakStructure(Structure* structure)
+ LValue weakStructureID(Structure* structure)
{
addWeakReference(structure);
return m_out.constInt32(structure->id());
}
+ LValue weakStructure(Structure* structure)
+ {
+ return weakPointer(structure);
+ }
+
TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
{
return m_out.address(base, m_heaps.variables[operand], offset);
return addressFor(operand, TagOffset);
}
- NO_RETURN_DUE_TO_ASSERT void loweringFailed(Node* node, const char* file, int line, const char* function, const char* assertion)
+ AbstractValue abstractValue(Node* node)
{
- if (!ASSERT_DISABLED) {
- dataLog("FTL ASSERTION FAILED: ", assertion, "\n");
- dataLog(file, "(", line, ") : ", function, "\n");
- dataLog("While handling node ", node, "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- m_loweringSucceeded = false;
+ return m_state.forNode(node);
+ }
+ AbstractValue abstractValue(Edge edge)
+ {
+ return abstractValue(edge.node());
+ }
+
+ SpeculatedType provenType(Node* node)
+ {
+ return abstractValue(node).m_type;
+ }
+ SpeculatedType provenType(Edge edge)
+ {
+ return provenType(edge.node());
+ }
+
+ JSValue provenValue(Node* node)
+ {
+ return abstractValue(node).m_value;
+ }
+ JSValue provenValue(Edge edge)
+ {
+ return provenValue(edge.node());
+ }
+
+ StructureAbstractValue abstractStructure(Node* node)
+ {
+ return abstractValue(node).m_structure;
+ }
+ StructureAbstractValue abstractStructure(Edge edge)
+ {
+ return abstractStructure(edge.node());
+ }
+
+ void crash()
+ {
+ crash(m_highBlock->index, m_node->index());
+ }
+ void crash(BlockIndex blockIndex, unsigned nodeIndex)
+ {
+#if ASSERT_DISABLED
+ m_out.call(m_out.operation(ftlUnreachable));
+ UNUSED_PARAM(blockIndex);
+ UNUSED_PARAM(nodeIndex);
+#else
+ m_out.call(
+ m_out.intToPtr(
+ m_out.constIntPtr(ftlUnreachable),
+ pointerType(
+ functionType(
+ m_out.voidType, m_out.intPtr, m_out.int32, m_out.int32))),
+ m_out.constIntPtr(codeBlock()), m_out.constInt32(blockIndex),
+ m_out.constInt32(nodeIndex));
+#endif
+ m_out.unreachable();
}
+ AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
+
VM& vm() { return m_graph.m_vm; }
CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
Graph& m_graph;
State& m_ftlState;
- bool m_loweringSucceeded;
AbstractHeapRepository m_heaps;
Output m_out;
LBasicBlock m_handleExceptions;
HashMap<BasicBlock*, LBasicBlock> m_blocks;
+ LValue m_execState;
+ LValue m_execStorage;
LValue m_callFrame;
LValue m_captured;
LValue m_tagTypeNumber;
HashMap<Node*, LoweredNodeValue> m_storageValues;
HashMap<Node*, LoweredNodeValue> m_doubleValues;
+ // This is a bit of a hack. It prevents LLVM from having to do CSE on loading of arguments.
+ // It's nice to have these optimizations on our end because we can guarantee them a bit better.
+ // Probably also saves LLVM compile time.
+ HashMap<Node*, LValue> m_loadedArgumentValues;
+
HashMap<Node*, LValue> m_phis;
- Operands<Availability> m_availability;
+ LocalOSRAvailabilityCalculator m_availabilityCalculator;
Vector<AvailableRecovery, 3> m_availableRecoveries;
Node* m_node;
uint32_t m_stackmapIDs;
+ unsigned m_tbaaKind;
+ unsigned m_tbaaStructKind;
};
-bool lowerDFGToLLVM(State& state)
+} // anonymous namespace
+
+void lowerDFGToLLVM(State& state)
{
LowerDFGToLLVM lowering(state);
- return lowering.lower();
+ lowering.lower();
}
} } // namespace JSC::FTL