+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJITLocker locker(baselineBlock->m_lock);
+ if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, baselineBlock, codeOrigin.bytecodeIndex);
+ }
+
+ PutByIdStatus result;
+ {
+ ConcurrentJITLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfo(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+ // it would be better not to do that. But it means that we have to defend
+ // ourselves here.
+ if (result.isSimple())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
+
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+ if (parseIndex(*uid))
+ return PutByIdStatus(TakesSlowPath);
+
+ if (set.isEmpty())
+ return PutByIdStatus();
+
+ PutByIdStatus result;
+ result.m_state = Simple;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return PutByIdStatus(TakesSlowPath);
+
+ if (!structure->propertyAccessesAreCacheable())
+ return PutByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (isValidOffset(offset)) {
+ if (attributes & CustomAccessor)
+ return PutByIdStatus(MakesCalls);
+
+ if (attributes & (Accessor | ReadOnly))
+ return PutByIdStatus(TakesSlowPath);
+
+ WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+ if (!replaceSet || replaceSet->isStillValid()) {
+ // When this executes, it'll create, and fire, this replacement watchpoint set.
+ // That means that this has probably never executed or that something fishy is
+ // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+ // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+ // So, better leave this alone and take slow path.
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ if (!result.appendVariant(PutByIdVariant::replace(structure, offset)))
+ return PutByIdStatus(TakesSlowPath);
+ continue;
+ }
+
+ // Our hypothesis is that we're doing a transition. Before we prove that this is really
+ // true, we want to do some sanity checks.
+
+ // Don't cache put transitions on dictionaries.
+ if (structure->isDictionary())
+ return PutByIdStatus(TakesSlowPath);
+
+ // If the structure corresponds to something that isn't an object, then give up, since
+ // we don't want to be adding properties to strings.
+ if (!structure->typeInfo().isObject())
+ return PutByIdStatus(TakesSlowPath);
+
+ RefPtr<IntendedStructureChain> chain;
+ if (!isDirect) {
+ chain = adoptRef(new IntendedStructureChain(globalObject, structure));
+
+ // If the prototype chain has setters or read-only properties, then give up.
+ if (chain->mayInterceptStoreTo(uid))
+ return PutByIdStatus(TakesSlowPath);
+
+ // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
+ // then give up. The dictionary case would only happen if this structure has not been
+ // used in an optimized put_by_id transition. And really the only reason why we would
+ // bail here is that I don't really feel like having the optimizing JIT go and flatten
+ // dictionaries if we have evidence to suggest that those objects were never used as
+ // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
+ // the other checks below will fail.
+ if (structure->isProxy() || !chain->isNormalized())
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ // We only optimize if there is already a structure that the transition is cached to.
+ Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+ if (!transition)
+ return PutByIdStatus(TakesSlowPath);
+ ASSERT(isValidOffset(offset));
+
+ bool didAppend = result.appendVariant(
+ PutByIdVariant::transition(structure, transition, chain.get(), offset));
+ if (!didAppend)
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ return result;
+}
+
+bool PutByIdStatus::makesCalls() const
+{
+ if (m_state == MakesCalls)
+ return true;
+
+ if (m_state != Simple)
+ return false;
+
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].makesCalls())
+ return true;
+ }
+
+ return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+ switch (m_state) {
+ case NoInformation:
+ out.print("(NoInformation)");
+ return;
+
+ case Simple:
+ out.print("(", listDump(m_variants), ")");
+ return;
+
+ case TakesSlowPath:
+ out.print("(TakesSlowPath)");
+ return;
+ case MakesCalls:
+ out.print("(MakesCalls)");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();