/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "config.h"
#include "GetByIdStatus.h"
+#include "AccessorCallJITStubRoutine.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "JSCInlines.h"
+#include "JSScope.h"
+#include "LLIntData.h"
#include "LowLevelInterpreter.h"
+#include "PolymorphicGetByIdList.h"
+#include <wtf/ListDump.h>
namespace JSC {
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+ // Attempt to merge this variant with an already existing variant.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+
+ // Make sure there is no overlap. We should have pruned out opportunities for
+ // overlap but it's possible that an inline cache got into a weird state. We are
+ // defensive and bail if we detect crazy.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+ return false;
+ }
+
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
+
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(ident);
-#if ENABLE(LLINT)
+ UNUSED_PARAM(uid);
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- if (instruction[0].u.opcode == llint_op_method_check)
- instruction++;
+ if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length))
+ return GetByIdStatus(NoInformation, false);
Structure* structure = instruction[4].u.structure.get();
if (!structure)
- return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+ return GetByIdStatus(NoInformation, false);
+
+ if (structure->takesSlowPathInDFGForImpureProperty())
+ return GetByIdStatus(NoInformation, false);
+
+ unsigned attributesIgnored;
+ PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored);
+ if (!isValidOffset(offset))
+ return GetByIdStatus(NoInformation, false);
- size_t offset = structure->get(*profiledBlock->globalData(), ident);
- if (offset == notFound)
- return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
+}
+
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
+{
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+
+ GetByIdStatus result;
+
+#if ENABLE(DFG_JIT)
+ result = computeForStubInfo(
+ locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
- return GetByIdStatus(SimpleDirect, StructureSet(structure), offset, false);
+ if (!result.takesSlowPath()
+ && hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
#else
- return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+ UNUSED_PARAM(map);
#endif
+
+ if (!result)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+
+ return result;
}
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+#if ENABLE(JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(
+ const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData callExitSiteData)
{
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(ident);
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
- if (!profiledBlock->numberOfStructureStubInfos())
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ if (!stubInfo)
+ return GetByIdStatus(NoInformation);
- // First check if it makes either calls, in which case we want to be super careful, or
- // if it's not set at all, in which case we punt.
- StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
- if (!stubInfo.seen)
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ if (!stubInfo->seen)
+ return GetByIdStatus(NoInformation);
- PolymorphicAccessStructureList* list;
- int listSize;
- switch (stubInfo.accessType) {
- case access_get_by_id_self_list:
- list = stubInfo.u.getByIdSelfList.structureList;
- listSize = stubInfo.u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- list = stubInfo.u.getByIdProtoList.structureList;
- listSize = stubInfo.u.getByIdProtoList.listSize;
- break;
- default:
- list = 0;
- listSize = 0;
- break;
- }
- for (int i = 0; i < listSize; ++i) {
- if (!list->list[i].isDirect)
- return GetByIdStatus(MakesCalls, StructureSet(), notFound, true);
+ PolymorphicGetByIdList* list = 0;
+ State slowPathState = TakesSlowPath;
+ if (stubInfo->accessType == access_get_by_id_list) {
+ list = stubInfo->u.getByIdList.list;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const GetByIdAccess& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
+ }
}
- // Next check if it takes slow case, in which case we want to be kind of careful.
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, StructureSet(), notFound, true);
+ if (stubInfo->tookSlowPath)
+ return GetByIdStatus(slowPathState);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
- result.m_wasSeenInJIT = true;
- switch (stubInfo.accessType) {
+ result.m_state = Simple;
+ result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
+ switch (stubInfo->accessType) {
case access_unset:
- return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+ return GetByIdStatus(NoInformation);
case access_get_by_id_self: {
- Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
- result.m_offset = structure->get(*profiledBlock->globalData(), ident);
+ Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
+ if (structure->takesSlowPathInDFGForImpureProperty())
+ return GetByIdStatus(slowPathState, true);
+ unsigned attributesIgnored;
+ GetByIdVariant variant;
+ variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
+ if (!isValidOffset(variant.m_offset))
+ return GetByIdStatus(slowPathState, true);
- if (result.m_offset != notFound)
- result.m_structureSet.add(structure);
-
- if (result.m_offset != notFound)
- ASSERT(result.m_structureSet.size());
- break;
+ variant.m_structureSet.add(structure);
+ bool didAppend = result.appendVariant(variant);
+ ASSERT_UNUSED(didAppend, didAppend);
+ return result;
}
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
- unsigned size = stubInfo.u.getByIdProtoList.listSize;
- for (unsigned i = 0; i < size; ++i) {
- ASSERT(list->list[i].isDirect);
+ case access_get_by_id_list: {
+ for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+ Structure* structure = list->at(listIndex).structure();
- Structure* structure = list->list[i].base.get();
- if (result.m_structureSet.contains(structure))
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ profiledBlock, structure, list->at(listIndex).chain(),
+ list->at(listIndex).chainCount(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
continue;
-
- size_t myOffset = structure->get(*profiledBlock->globalData(), ident);
-
- if (myOffset == notFound) {
- result.m_offset = notFound;
- break;
- }
-
- if (!i)
- result.m_offset = myOffset;
- else if (result.m_offset != myOffset) {
- result.m_offset = notFound;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return GetByIdStatus(slowPathState, true);
+
+ case ComplexGetStatus::Inlineable: {
+ std::unique_ptr<CallLinkStatus> callLinkStatus;
+ switch (list->at(listIndex).type()) {
+ case GetByIdAccess::SimpleInline:
+ case GetByIdAccess::SimpleStub: {
+ break;
+ }
+ case GetByIdAccess::Getter: {
+ AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>(
+ list->at(listIndex).stubRoutine());
+ callLinkStatus = std::make_unique<CallLinkStatus>(
+ CallLinkStatus::computeFor(
+ locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
+ break;
+ }
+ case GetByIdAccess::SimpleMiss:
+ case GetByIdAccess::CustomGetter:
+ case GetByIdAccess::WatchedStub:{
+ // FIXME: It would be totally sweet to support this at some point in the future.
+ // https://bugs.webkit.org/show_bug.cgi?id=133052
+ return GetByIdStatus(slowPathState, true);
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ GetByIdVariant variant(
+ StructureSet(structure), complexGetStatus.offset(), complexGetStatus.chain(),
+ WTF::move(callLinkStatus));
+
+ if (!result.appendVariant(variant))
+ return GetByIdStatus(slowPathState, true);
break;
- }
-
- result.m_structureSet.add(structure);
+ } }
}
-
- if (result.m_offset != notFound)
- ASSERT(result.m_structureSet.size());
- break;
+
+ return result;
}
default:
- ASSERT(result.m_offset == notFound);
- break;
+ return GetByIdStatus(slowPathState, true);
}
- if (result.m_offset == notFound) {
- result.m_state = TakesSlowPath;
- result.m_structureSet.clear();
- } else
- result.m_state = SimpleDirect;
+ RELEASE_ASSERT_NOT_REACHED();
+ return GetByIdStatus();
+}
+#endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+ CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+ StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, profiledBlock, codeOrigin.bytecodeIndex);
+ }
+
+ GetByIdStatus result;
+ {
+ ConcurrentJITLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfo(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ if (result.takesSlowPath())
+ return result;
+
+ {
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+ if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(TakesSlowPath, true);
+ }
+
+ if (result.isSet())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
+
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
+{
+ // For now we only handle the super simple self access case. We could handle the
+ // prototype case in the future.
+
+ if (set.isEmpty())
+ return GetByIdStatus();
+
+ if (parseIndex(*uid))
+ return GetByIdStatus(TakesSlowPath);
+
+ GetByIdStatus result;
+ result.m_state = Simple;
+ result.m_wasSeenInJIT = false;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GetByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(offset))
+ return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+ if (attributes & Accessor)
+ return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call.
+
+ if (!result.appendVariant(GetByIdVariant(structure, offset)))
+ return GetByIdStatus(TakesSlowPath);
+ }
return result;
-#else // ENABLE(JIT)
- return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
-#endif // ENABLE(JIT)
+}
+
+bool GetByIdStatus::makesCalls() const
+{
+ switch (m_state) {
+ case NoInformation:
+ case TakesSlowPath:
+ return false;
+ case Simple:
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].callLinkStatus())
+ return true;
+ }
+ return false;
+ case MakesCalls:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return false;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+ out.print("(");
+ switch (m_state) {
+ case NoInformation:
+ out.print("NoInformation");
+ break;
+ case Simple:
+ out.print("Simple");
+ break;
+ case TakesSlowPath:
+ out.print("TakesSlowPath");
+ break;
+ case MakesCalls:
+ out.print("MakesCalls");
+ break;
+ }
+ out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
}
} // namespace JSC