+bool ByteCodeParser::handleTypedArrayConstructor(
+ int resultOperand, InternalFunction* function, int registerOffset,
+ int argumentCountIncludingThis, TypedArrayType type)
+{
+ if (!isTypedView(type))
+ return false;
+
+ if (function->classInfo() != constructorClassInfoForType(type))
+ return false;
+
+ if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+ return false;
+
+ // We only have an intrinsic for the case where you say:
+ //
+ // new FooArray(blah);
+ //
+ // Of course, 'blah' could be any of the following:
+ //
+ // - Integer, indicating that you want to allocate an array of that length.
+ // This is the thing we're hoping for, and what we can actually do meaningful
+ // optimizations for.
+ //
+ // - Array buffer, indicating that you want to create a view onto that _entire_
+ // buffer.
+ //
+ // - Non-buffer object, indicating that you want to create a copy of that
+ // object by pretending that it quacks like an array.
+ //
+ // - Anything else, indicating that you want to have an exception thrown at
+ // you.
+ //
+ // The intrinsic, NewTypedArray, will behave as if it could do any of these
+ // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
+ // predicted Int32, then we lock it in as a normal typed array allocation.
+ // Otherwise, NewTypedArray turns into a totally opaque function call that
+ // may clobber the world - by virtue of it accessing properties on what could
+ // be an object.
+ //
+ // Note that although the generic form of NewTypedArray sounds sort of awful,
+ // it is actually quite likely to be more efficient than a fully generic
+ // Construct. So, we might want to think about making NewTypedArray variadic,
+ // or else making Construct not super slow.
+
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ set(VirtualRegister(resultOperand),
+ addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+}
+
+bool ByteCodeParser::handleConstantInternalFunction(
+ int resultOperand, InternalFunction* function, int registerOffset,
+ int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
+{
+ // If we ever find that we have a lot of internal functions that we specialize for,
+ // then we should probably have some sort of hashtable dispatch, or maybe even
+ // dispatch straight through the MethodTable of the InternalFunction. But for now,
+ // it seems that this case is hit infrequently enough, and the number of functions
+ // we know about is small enough, that having just a linear cascade of if statements
+ // is good enough.
+
+ UNUSED_PARAM(prediction); // Remove this once we do more things.
+
+ if (function->classInfo() == ArrayConstructor::info()) {
+ if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
+ return false;
+
+ if (argumentCountIncludingThis == 2) {
+ set(VirtualRegister(resultOperand),
+ addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+ }
+
+ for (int i = 1; i < argumentCountIncludingThis; ++i)
+ addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
+ set(VirtualRegister(resultOperand),
+ addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
+ return true;
+ }
+
+ if (function->classInfo() == StringConstructor::info()) {
+ Node* result;
+
+ if (argumentCountIncludingThis <= 1)
+ result = cellConstant(m_vm->smallStrings.emptyString());
+ else
+ result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
+
+ if (kind == CodeForConstruct)
+ result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
+
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
+ bool result = handleTypedArrayConstructor(
+ resultOperand, function, registerOffset, argumentCountIncludingThis,
+ indexToTypedArrayType(typeIndex));
+ if (result)
+ return true;
+ }
+
+ return false;
+}
+
+Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
+{
+ Node* propertyStorage;
+ if (isInlineOffset(offset))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ return getByOffset;
+}
+
+void ByteCodeParser::handleGetByOffset(
+ int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+ PropertyOffset offset)
+{
+ set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset));
+}
+
+Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
+{
+ Node* propertyStorage;
+ if (isInlineOffset(offset))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifier;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ return result;
+}
+
+Node* ByteCodeParser::emitPrototypeChecks(
+ Structure* structure, IntendedStructureChain* chain)
+{
+ Node* base = 0;
+ m_graph.chains().addLazily(chain);
+ Structure* currentStructure = structure;
+ JSObject* currentObject = 0;
+ for (unsigned i = 0; i < chain->size(); ++i) {
+ currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
+ currentStructure = chain->at(i);
+ base = cellConstantWithStructureCheck(currentObject, currentStructure);
+ }
+ return base;
+}
+
+void ByteCodeParser::handleGetById(
+ int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
+ const GetByIdStatus& getByIdStatus)
+{
+ if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(
+ getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
+ OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (getByIdStatus.numVariants() > 1) {
+ if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicAccessInlining()) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+
+ // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
+ // optimal, if there is some rarely executed case in the chain that requires a lot
+ // of checks and those checks are not watchpointable.
+ for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;) {
+ if (getByIdStatus[variantIndex].chain()) {
+ emitPrototypeChecks(
+ getByIdStatus[variantIndex].structureSet().singletonStructure(),
+ getByIdStatus[variantIndex].chain());
+ }
+ }
+
+ // 2) Emit a MultiGetByOffset
+ MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
+ data->variants = getByIdStatus.variants();
+ data->identifierNumber = identifierNumber;
+ set(VirtualRegister(destinationOperand),
+ addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
+ return;
+ }
+
+ ASSERT(getByIdStatus.numVariants() == 1);
+ GetByIdVariant variant = getByIdStatus[0];
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+
+ Node* originalBaseForBaselineJIT = base;
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
+
+ if (variant.chain()) {
+ base = emitPrototypeChecks(
+ variant.structureSet().singletonStructure(), variant.chain());
+ }
+
+ // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
+ // ensure that the base of the original get_by_id is kept alive until we're done with
+ // all of the speculations. We only insert the Phantom if there had been a CheckStructure
+ // on something other than the base following the CheckStructure on base, or if the
+ // access was compiled to a WeakJSConstant specific value, in which case we might not
+ // have any explicit use of the base at all.
+ if (variant.specificValue() || originalBaseForBaselineJIT != base)
+ addToGraph(Phantom, originalBaseForBaselineJIT);
+
+ if (variant.specificValue()) {
+ ASSERT(variant.specificValue().isCell());
+
+ set(VirtualRegister(destinationOperand), cellConstant(variant.specificValue().asCell()));
+ return;
+ }
+
+ handleGetByOffset(
+ destinationOperand, prediction, base, identifierNumber, variant.offset());
+}
+
+void ByteCodeParser::emitPutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (isDirect)
+ addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+ else
+ addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
+}
+
+void ByteCodeParser::handlePutById(
+ Node* base, unsigned identifierNumber, Node* value,
+ const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
+ if (!putByIdStatus.isSet())
+ addToGraph(ForceOSRExit);
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (putByIdStatus.numVariants() > 1) {
+ if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+ || !Options::enablePolymorphicAccessInlining()) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+
+ if (!isDirect) {
+ for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
+ if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
+ continue;
+ if (!putByIdStatus[variantIndex].structureChain())
+ continue;
+ emitPrototypeChecks(
+ putByIdStatus[variantIndex].oldStructure(),
+ putByIdStatus[variantIndex].structureChain());
+ }
+ }
+
+ MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
+ data->variants = putByIdStatus.variants();
+ data->identifierNumber = identifierNumber;
+ addToGraph(MultiPutByOffset, OpInfo(data), base, value);
+ return;
+ }
+
+ ASSERT(putByIdStatus.numVariants() == 1);
+ const PutByIdVariant& variant = putByIdStatus[0];
+
+ if (variant.kind() == PutByIdVariant::Replace) {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
+ handlePutByOffset(base, identifierNumber, variant.offset(), value);
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+ return;
+ }
+
+ if (variant.kind() != PutByIdVariant::Transition) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (variant.structureChain() && !variant.structureChain()->isStillValid()) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ m_graph.chains().addLazily(variant.structureChain());
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
+ if (!isDirect)
+ emitPrototypeChecks(variant.oldStructure(), variant.structureChain());
+
+ ASSERT(variant.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
+
+ Node* propertyStorage;
+ StructureTransitionData* transitionData = m_graph.addStructureTransitionData(
+ StructureTransitionData(variant.oldStructure(), variant.newStructure()));
+
+ if (variant.oldStructure()->outOfLineCapacity()
+ != variant.newStructure()->outOfLineCapacity()) {
+
+ // If we're growing the property storage then it must be because we're
+ // storing into the out-of-line storage.
+ ASSERT(!isInlineOffset(variant.offset()));
+
+ if (!variant.oldStructure()->outOfLineCapacity()) {
+ propertyStorage = addToGraph(
+ AllocatePropertyStorage, OpInfo(transitionData), base);
+ } else {
+ propertyStorage = addToGraph(
+ ReallocatePropertyStorage, OpInfo(transitionData),
+ base, addToGraph(GetButterfly, base));
+ }
+ } else {
+ if (isInlineOffset(variant.offset()))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ }
+
+ addToGraph(PutStructure, OpInfo(transitionData), base);
+
+ addToGraph(
+ PutByOffset,
+ OpInfo(m_graph.m_storageAccessData.size()),
+ propertyStorage,
+ base,
+ value);
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = variant.offset();
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+}
+