]> git.saurik.com Git - apt.git/blobdiff - apt-pkg/acquire-item.cc
avoid triggering gcc's -Wunsafe-loop-optimizations in EDSP
[apt.git] / apt-pkg / acquire-item.cc
index 0569c6ddabdfa0a0266a33d57daef62366695371..8c45acdddb9c2117dc4461849c9124c4353ade7e 100644 (file)
@@ -132,6 +132,49 @@ static std::string GetDiffIndexURI(IndexTarget const &Target)              /*{{{*/
 }
                                                                        /*}}}*/
 
+static void ReportMirrorFailureToCentral(pkgAcquire::Item const &I, std::string const &FailCode, std::string const &Details)/*{{{*/
+{
+   // we only act if a mirror was used at all
+   if(I.UsedMirror.empty())
+      return;
+#if 0
+   std::cerr << "\nReportMirrorFailure: "
+            << UsedMirror
+            << " Uri: " << DescURI()
+            << " FailCode: "
+            << FailCode << std::endl;
+#endif
+   string const report = _config->Find("Methods::Mirror::ProblemReporting",
+                                "/usr/lib/apt/apt-report-mirror-failure");
+   if(!FileExists(report))
+      return;
+
+   std::vector<char const*> const Args = {
+      report.c_str(),
+      I.UsedMirror.c_str(),
+      I.DescURI().c_str(),
+      FailCode.c_str(),
+      Details.c_str(),
+      NULL
+   };
+
+   pid_t pid = ExecFork();
+   if(pid < 0)
+   {
+      _error->Error("ReportMirrorFailure Fork failed");
+      return;
+   }
+   else if(pid == 0)
+   {
+      execvp(Args[0], (char**)Args.data());
+      std::cerr << "Could not exec " << Args[0] << std::endl;
+      _exit(100);
+   }
+   if(!ExecWait(pid, "report-mirror-failure"))
+      _error->Warning("Couldn't report problem to '%s'", report.c_str());
+}
+                                                                       /*}}}*/
+
 static bool MessageInsecureRepository(bool const isError, std::string const &msg)/*{{{*/
 {
    if (isError)
@@ -147,14 +190,14 @@ static bool MessageInsecureRepository(bool const isError, std::string const &msg
    _error->Notice("%s", _("See apt-secure(8) manpage for repository creation and user configuration details."));
    return false;
 }
-static bool MessageInsecureRepository(bool const isError, char const * const msg, std::string const &repo)
+static bool APT_NONNULL(2) MessageInsecureRepository(bool const isError, char const * const msg, std::string const &repo)
 {
    std::string m;
    strprintf(m, msg, repo.c_str());
    return MessageInsecureRepository(isError, m);
 }
                                                                        /*}}}*/
-static bool AllowInsecureRepositories(char const * const msg, std::string const &repo,/*{{{*/
+static bool APT_NONNULL(1, 3, 4, 5) AllowInsecureRepositories(char const * const msg, std::string const &repo,/*{{{*/
       metaIndex const * const MetaIndexParser, pkgAcqMetaClearSig * const TransactionManager, pkgAcquire::Item * const I)
 {
    if(MetaIndexParser->GetTrusted() == metaIndex::TRI_YES)
@@ -196,8 +239,7 @@ APT_CONST bool pkgAcqTransactionItem::HashesRequired() const
       we can at least trust them for integrity of the download itself.
       Only repositories without a Release file can (obviously) not have
       hashes – and they are very uncommon and strongly discouraged */
-   return TransactionManager->MetaIndexParser != NULL &&
-      TransactionManager->MetaIndexParser->GetLoadedSuccessfully() == metaIndex::TRI_YES;
+   return TransactionManager->MetaIndexParser->GetLoadedSuccessfully() == metaIndex::TRI_YES;
 }
 HashStringList pkgAcqTransactionItem::GetExpectedHashes() const
 {
@@ -288,13 +330,27 @@ bool pkgAcqTransactionItem::QueueURI(pkgAcquire::ItemDesc &Item)
       return false;
    }
    std::string const FinalFile = GetFinalFilename();
-   if (TransactionManager != NULL && TransactionManager->IMSHit == true &&
-        FileExists(FinalFile) == true)
+   if (TransactionManager->IMSHit == true && FileExists(FinalFile) == true)
    {
       PartialFile = DestFile = FinalFile;
       Status = StatDone;
       return false;
    }
+   // If we got the InRelease file via a mirror, pick all indexes directly from this mirror, too
+   if (TransactionManager->BaseURI.empty() == false &&
+        URI::SiteOnly(Item.URI) != URI::SiteOnly(TransactionManager->BaseURI))
+   {
+      // this ensures we rewrite only once and only the first step
+      auto const OldBaseURI = Target.Option(IndexTarget::BASE_URI);
+      if (OldBaseURI.empty() == false && APT::String::Startswith(Item.URI, OldBaseURI))
+      {
+        auto const ExtraPath = Item.URI.substr(OldBaseURI.length());
+        Item.URI = flCombine(TransactionManager->BaseURI, ExtraPath);
+        UsedMirror = TransactionManager->UsedMirror;
+        if (Item.Description.find(" ") != string::npos)
+           Item.Description.replace(0, Item.Description.find(" "), UsedMirror);
+      }
+   }
    return pkgAcquire::Item::QueueURI(Item);
 }
 /* The transition manager InRelease itself (or its older sisters-in-law
@@ -519,6 +575,41 @@ class APT_HIDDEN NoActionItem : public pkgAcquire::Item                    /*{{{*/
    }
 };
                                                                        /*}}}*/
+class APT_HIDDEN CleanupItem : public pkgAcqTransactionItem            /*{{{*/
+/* This class ensures that a file which was configured but isn't downloaded
+   for various reasons isn't kept in an old version in the lists directory.
+   In a way its the reverse of NoActionItem as it helps with removing files
+   even if the lists-cleanup is deactivated. */
+{
+   public:
+   virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
+   virtual HashStringList GetExpectedHashes()  const APT_OVERRIDE {return HashStringList();};
+
+   CleanupItem(pkgAcquire * const Owner, pkgAcqMetaClearSig * const TransactionManager, IndexTarget const &Target) :
+      pkgAcqTransactionItem(Owner, TransactionManager, Target)
+   {
+      Status = StatDone;
+      DestFile = GetFinalFileNameFromURI(Target.URI);
+   }
+   bool TransactionState(TransactionStates const state) APT_OVERRIDE
+   {
+      switch (state)
+      {
+        case TransactionStarted:
+           break;
+        case TransactionAbort:
+           break;
+        case TransactionCommit:
+           if (_config->FindB("Debug::Acquire::Transaction", false) == true)
+              std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
+           if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
+              return false;
+           break;
+      }
+      return true;
+   }
+};
+                                                                       /*}}}*/
 
 // Acquire::Item::Item - Constructor                                   /*{{{*/
 APT_IGNORE_DEPRECATED_PUSH
@@ -572,8 +663,6 @@ APT_CONST bool pkgAcquire::Item::IsTrusted() const                  /*{{{*/
    fetch this object */
 void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
 {
-   if(ErrorText.empty())
-      ErrorText = LookupTag(Message,"Message");
    if (QueueCounter <= 1)
    {
       /* This indicates that the file is not available right now but might
@@ -604,16 +693,63 @@ void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig con
    }
 
    string const FailReason = LookupTag(Message, "FailReason");
-   if (FailReason == "MaximumSizeExceeded")
-      RenameOnError(MaximumSizeExceeded);
+   enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, OTHER } failreason = OTHER;
+   if ( FailReason == "MaximumSizeExceeded")
+      failreason = MAXIMUM_SIZE_EXCEEDED;
    else if (Status == StatAuthError)
-      RenameOnError(HashSumMismatch);
+      failreason = HASHSUM_MISMATCH;
+
+   if(ErrorText.empty())
+   {
+      if (Status == StatAuthError)
+      {
+        std::ostringstream out;
+        switch (failreason)
+        {
+           case HASHSUM_MISMATCH:
+              out << _("Hash Sum mismatch") << std::endl;
+              break;
+           case MAXIMUM_SIZE_EXCEEDED:
+           case OTHER:
+              out << LookupTag(Message, "Message") << std::endl;
+              break;
+        }
+        auto const ExpectedHashes = GetExpectedHashes();
+        if (ExpectedHashes.empty() == false)
+        {
+           out << "Hashes of expected file:" << std::endl;
+           for (auto const &hs: ExpectedHashes)
+              out << " - " << hs.toStr() << std::endl;
+        }
+        if (failreason == HASHSUM_MISMATCH)
+        {
+           out << "Hashes of received file:" << std::endl;
+           for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+           {
+              std::string const tagname = std::string(*type) + "-Hash";
+              std::string const hashsum = LookupTag(Message, tagname.c_str());
+              if (hashsum.empty() == false)
+                 out << " - " << HashString(*type, hashsum).toStr() << std::endl;
+           }
+           out << "Last modification reported: " << LookupTag(Message, "Last-Modified", "<none>") << std::endl;
+        }
+        ErrorText = out.str();
+      }
+      else
+        ErrorText = LookupTag(Message,"Message");
+   }
+
+   switch (failreason)
+   {
+      case MAXIMUM_SIZE_EXCEEDED: RenameOnError(MaximumSizeExceeded); break;
+      case HASHSUM_MISMATCH: RenameOnError(HashSumMismatch); break;
+      case OTHER: break;
+   }
 
-   // report mirror failure back to LP if we actually use a mirror
    if (FailReason.empty() == false)
-      ReportMirrorFailure(FailReason);
+      ReportMirrorFailureToCentral(*this, FailReason, ErrorText);
    else
-      ReportMirrorFailure(ErrorText);
+      ReportMirrorFailureToCentral(*this, ErrorText, ErrorText);
 
    if (QueueCounter > 1)
       Status = StatIdle;
@@ -701,13 +837,10 @@ bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const
    {
       case HashSumMismatch:
         errtext = _("Hash Sum mismatch");
-        Status = StatAuthError;
-        ReportMirrorFailure("HashChecksumFailure");
         break;
       case SizeMismatch:
         errtext = _("Size mismatch");
         Status = StatAuthError;
-        ReportMirrorFailure("SizeFailure");
         break;
       case InvalidFormat:
         errtext = _("Invalid file format");
@@ -724,7 +857,6 @@ bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const
         break;
       case MaximumSizeExceeded:
         // the method is expected to report a good error for this
-        Status = StatError;
         break;
       case PDiffError:
         // no handling here, done by callers
@@ -742,47 +874,9 @@ void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
 }
                                                                        /*}}}*/
 // Acquire::Item::ReportMirrorFailure                                  /*{{{*/
-void pkgAcquire::Item::ReportMirrorFailure(string const &FailCode)
+void pkgAcquire::Item::ReportMirrorFailure(std::string const &FailCode)
 {
-   // we only act if a mirror was used at all
-   if(UsedMirror.empty())
-      return;
-#if 0
-   std::cerr << "\nReportMirrorFailure: " 
-            << UsedMirror
-            << " Uri: " << DescURI()
-            << " FailCode: " 
-            << FailCode << std::endl;
-#endif
-   string report = _config->Find("Methods::Mirror::ProblemReporting", 
-                                "/usr/lib/apt/apt-report-mirror-failure");
-   if(!FileExists(report))
-      return;
-
-   std::vector<char const*> Args;
-   Args.push_back(report.c_str());
-   Args.push_back(UsedMirror.c_str());
-   Args.push_back(DescURI().c_str());
-   Args.push_back(FailCode.c_str());
-   Args.push_back(NULL);
-
-   pid_t pid = ExecFork();
-   if(pid < 0)
-   {
-      _error->Error("ReportMirrorFailure Fork failed");
-      return;
-   }
-   else if(pid == 0)
-   {
-      execvp(Args[0], (char**)Args.data());
-      std::cerr << "Could not exec " << Args[0] << std::endl;
-      _exit(100);
-   }
-   if(!ExecWait(pid, "report-mirror-failure"))
-   {
-      _error->Warning("Couldn't report problem to '%s'",
-                     _config->Find("Methods::Mirror::ProblemReporting").c_str());
-   }
+   ReportMirrorFailureToCentral(*this, FailCode, FailCode);
 }
                                                                        /*}}}*/
 std::string pkgAcquire::Item::HashSum() const                          /*{{{*/
@@ -840,10 +934,8 @@ static void LoadLastMetaIndexParser(pkgAcqMetaClearSig * const TransactionManage
 // AcqMetaBase - Constructor                                           /*{{{*/
 pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire * const Owner,
       pkgAcqMetaClearSig * const TransactionManager,
-      std::vector<IndexTarget> const &IndexTargets,
       IndexTarget const &DataTarget)
 : pkgAcqTransactionItem(Owner, TransactionManager, DataTarget), d(NULL),
-   IndexTargets(IndexTargets),
    AuthPass(false), IMSHit(false), State(TransactionStarted)
 {
 }
@@ -939,37 +1031,36 @@ void pkgAcqMetaBase::TransactionStageRemoval(pkgAcqTransactionItem * const I,
 }
                                                                        /*}}}*/
 // AcqMetaBase::GenerateAuthWarning - Check gpg authentication error   /*{{{*/
+/* This method is called from ::Failed handlers. If it returns true,
+   no fallback to other files or modi is performed */
 bool pkgAcqMetaBase::CheckStopAuthentication(pkgAcquire::Item * const I, const std::string &Message)
 {
-   // FIXME: this entire function can do now that we disallow going to
-   //        a unauthenticated state and can cleanly rollback
-
    string const Final = I->GetFinalFilename();
-   if(FileExists(Final))
+   std::string const GPGError = LookupTag(Message, "Message");
+   if (FileExists(Final))
    {
       I->Status = StatTransientNetworkError;
-      _error->Warning(_("An error occurred during the signature "
-                        "verification. The repository is not updated "
-                        "and the previous index files will be used. "
-                        "GPG error: %s: %s"),
-                      Desc.Description.c_str(),
-                      LookupTag(Message,"Message").c_str());
+      _error->Warning(_("An error occurred during the signature verification. "
+              "The repository is not updated and the previous index files will be used. "
+              "GPG error: %s: %s"),
+           Desc.Description.c_str(),
+           GPGError.c_str());
       RunScripts("APT::Update::Auth-Failure");
       return true;
    } else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
       /* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
       _error->Error(_("GPG error: %s: %s"),
-                    Desc.Description.c_str(),
-                    LookupTag(Message,"Message").c_str());
+           Desc.Description.c_str(),
+           GPGError.c_str());
       I->Status = StatAuthError;
       return true;
    } else {
       _error->Warning(_("GPG error: %s: %s"),
-                      Desc.Description.c_str(),
-                      LookupTag(Message,"Message").c_str());
+           Desc.Description.c_str(),
+           GPGError.c_str());
    }
    // gpgv method failed
-   ReportMirrorFailure("GPGFailure");
+   ReportMirrorFailureToCentral(*this, "GPGFailure", GPGError);
    return false;
 }
                                                                        /*}}}*/
@@ -1007,6 +1098,15 @@ bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const st
    // We have just finished downloading a Release file (it is not
    // verified yet)
 
+   // Save the final base URI we got this Release file from
+   if (I->UsedMirror.empty() == false && _config->FindB("Acquire::SameMirrorForAllIndexes", true))
+   {
+      if (APT::String::Endswith(I->Desc.URI, "InRelease"))
+        TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("InRelease"));
+      else if (APT::String::Endswith(I->Desc.URI, "Release"))
+        TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("Release"));
+   }
+
    std::string const FileName = LookupTag(Message,"Filename");
    if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
    {
@@ -1033,8 +1133,7 @@ bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const st
    {
       // for simplicity, the transaction manager is always InRelease
       // even if it doesn't exist.
-      if (TransactionManager != NULL)
-        TransactionManager->IMSHit = true;
+      TransactionManager->IMSHit = true;
       I->PartialFile = I->DestFile = I->GetFinalFilename();
    }
 
@@ -1087,91 +1186,115 @@ bool pkgAcqMetaBase::CheckAuthDone(string const &Message)              /*{{{*/
                 << DestFile << std::endl;
 
    // Download further indexes with verification
-   QueueIndexes(true);
+   TransactionManager->QueueIndexes(true);
 
    return true;
 }
                                                                        /*}}}*/
-void pkgAcqMetaBase::QueueIndexes(bool const verify)                   /*{{{*/
+void pkgAcqMetaClearSig::QueueIndexes(bool const verify)                       /*{{{*/
 {
    // at this point the real Items are loaded in the fetcher
    ExpectedAdditionalItems = 0;
 
-  bool metaBaseSupportsByHash = false;
-  if (TransactionManager != NULL && TransactionManager->MetaIndexParser != NULL)
-     metaBaseSupportsByHash = TransactionManager->MetaIndexParser->GetSupportsAcquireByHash();
-
-   for (std::vector <IndexTarget>::iterator Target = IndexTargets.begin();
-        Target != IndexTargets.end();
-        ++Target)
+   std::set<std::string> targetsSeen;
+   bool const metaBaseSupportsByHash = TransactionManager->MetaIndexParser->GetSupportsAcquireByHash();
+   for (auto &Target: TransactionManager->MetaIndexParser->GetIndexTargets())
    {
+      // if we have seen a target which is created-by a target this one here is declared a
+      // fallback to, we skip acquiring the fallback (but we make sure we clean up)
+      if (targetsSeen.find(Target.Option(IndexTarget::FALLBACK_OF)) != targetsSeen.end())
+      {
+        targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
+        new CleanupItem(Owner, TransactionManager, Target);
+        continue;
+      }
       // all is an implementation detail. Users shouldn't use this as arch
       // We need this support trickery here as e.g. Debian has binary-all files already,
       // but arch:all packages are still in the arch:any files, so we would waste precious
       // download time, bandwidth and diskspace for nothing, BUT Debian doesn't feature all
       // in the set of supported architectures, so we can filter based on this property rather
       // than invent an entirely new flag we would need to carry for all of eternity.
-      if (Target->Option(IndexTarget::ARCHITECTURE) == "all")
+      if (Target.Option(IndexTarget::ARCHITECTURE) == "all")
       {
-        if (TransactionManager->MetaIndexParser->IsArchitectureSupported("all") == false)
-           continue;
-        if (TransactionManager->MetaIndexParser->IsArchitectureAllSupportedFor(*Target) == false)
+        if (TransactionManager->MetaIndexParser->IsArchitectureSupported("all") == false ||
+              TransactionManager->MetaIndexParser->IsArchitectureAllSupportedFor(Target) == false)
+        {
+           new CleanupItem(Owner, TransactionManager, Target);
            continue;
+        }
       }
 
-      bool trypdiff = Target->OptionBool(IndexTarget::PDIFFS);
+      bool trypdiff = Target.OptionBool(IndexTarget::PDIFFS);
       if (verify == true)
       {
-        if (TransactionManager->MetaIndexParser->Exists(Target->MetaKey) == false)
+        if (TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false)
         {
            // optional targets that we do not have in the Release file are skipped
-           if (Target->IsOptional)
+           if (Target.IsOptional)
+           {
+              new CleanupItem(Owner, TransactionManager, Target);
               continue;
+           }
 
-           std::string const &arch = Target->Option(IndexTarget::ARCHITECTURE);
+           std::string const &arch = Target.Option(IndexTarget::ARCHITECTURE);
            if (arch.empty() == false)
            {
               if (TransactionManager->MetaIndexParser->IsArchitectureSupported(arch) == false)
               {
+                 new CleanupItem(Owner, TransactionManager, Target);
                  _error->Notice(_("Skipping acquire of configured file '%s' as repository '%s' doesn't support architecture '%s'"),
-                       Target->MetaKey.c_str(), TransactionManager->Target.Description.c_str(), arch.c_str());
+                       Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str(), arch.c_str());
                  continue;
               }
               // if the architecture is officially supported but currently no packages for it available,
               // ignore silently as this is pretty much the same as just shipping an empty file.
               // if we don't know which architectures are supported, we do NOT ignore it to notify user about this
               if (TransactionManager->MetaIndexParser->IsArchitectureSupported("*undefined*") == false)
+              {
+                 new CleanupItem(Owner, TransactionManager, Target);
                  continue;
+              }
            }
 
            Status = StatAuthError;
-           strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target->MetaKey.c_str());
+           strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target.MetaKey.c_str());
            return;
         }
         else
         {
-           auto const hashes = GetExpectedHashesFor(Target->MetaKey);
-           if (hashes.usable() == false && hashes.empty() == false)
+           auto const hashes = GetExpectedHashesFor(Target.MetaKey);
+           if (hashes.empty() == false)
            {
-              _error->Warning(_("Skipping acquire of configured file '%s' as repository '%s' provides only weak security information for it"),
-                       Target->MetaKey.c_str(), TransactionManager->Target.Description.c_str());
-              continue;
+              if (hashes.usable() == false)
+              {
+                 new CleanupItem(Owner, TransactionManager, Target);
+                 _error->Warning(_("Skipping acquire of configured file '%s' as repository '%s' provides only weak security information for it"),
+                       Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str());
+                 continue;
+              }
+              // empty files are skipped as acquiring the very small compressed files is a waste of time
+              else if (hashes.FileSize() == 0)
+              {
+                 new CleanupItem(Owner, TransactionManager, Target);
+                 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
+                 continue;
+              }
            }
         }
 
         // autoselect the compression method
-        std::vector<std::string> types = VectorizeString(Target->Option(IndexTarget::COMPRESSIONTYPES), ' ');
+        std::vector<std::string> types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
         types.erase(std::remove_if(types.begin(), types.end(), [&](std::string const &t) {
            if (t == "uncompressed")
-              return TransactionManager->MetaIndexParser->Exists(Target->MetaKey) == false;
-           std::string const MetaKey = Target->MetaKey + "." + t;
+              return TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false;
+           std::string const MetaKey = Target.MetaKey + "." + t;
            return TransactionManager->MetaIndexParser->Exists(MetaKey) == false;
         }), types.end());
         if (types.empty() == false)
         {
            std::ostringstream os;
            // add the special compressiontype byhash first if supported
-           std::string const useByHashConf = Target->Option(IndexTarget::BY_HASH);
+           std::string const useByHashConf = Target.Option(IndexTarget::BY_HASH);
            bool useByHash = false;
            if(useByHashConf == "force")
               useByHash = true;
@@ -1181,12 +1304,12 @@ void pkgAcqMetaBase::QueueIndexes(bool const verify)                    /*{{{*/
               os << "by-hash ";
            std::copy(types.begin(), types.end()-1, std::ostream_iterator<std::string>(os, " "));
            os << *types.rbegin();
-           Target->Options["COMPRESSIONTYPES"] = os.str();
+           Target.Options["COMPRESSIONTYPES"] = os.str();
         }
         else
-           Target->Options["COMPRESSIONTYPES"].clear();
+           Target.Options["COMPRESSIONTYPES"].clear();
 
-        std::string filename = GetExistingFilename(GetFinalFileNameFromURI(Target->URI));
+        std::string filename = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
         if (filename.empty() == false)
         {
            // if the Release file is a hit and we have an index it must be the current one
@@ -1197,8 +1320,8 @@ void pkgAcqMetaBase::QueueIndexes(bool const verify)                      /*{{{*/
               // see if the file changed since the last Release file
               // we use the uncompressed files as we might compress differently compared to the server,
               // so the hashes might not match, even if they contain the same data.
-              HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target->MetaKey);
-              HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target->MetaKey);
+              HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target.MetaKey);
+              HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
               if (newFile != oldFile)
                  filename.clear();
            }
@@ -1210,57 +1333,42 @@ void pkgAcqMetaBase::QueueIndexes(bool const verify)                    /*{{{*/
 
         if (filename.empty() == false)
         {
-           new NoActionItem(Owner, *Target, filename);
-           std::string const idxfilename = GetFinalFileNameFromURI(GetDiffIndexURI(*Target));
+           new NoActionItem(Owner, Target, filename);
+           std::string const idxfilename = GetFinalFileNameFromURI(GetDiffIndexURI(Target));
            if (FileExists(idxfilename))
-              new NoActionItem(Owner, *Target, idxfilename);
+              new NoActionItem(Owner, Target, idxfilename);
+           targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
            continue;
         }
 
         // check if we have patches available
-        trypdiff &= TransactionManager->MetaIndexParser->Exists(GetDiffIndexFileName(Target->MetaKey));
+        trypdiff &= TransactionManager->MetaIndexParser->Exists(GetDiffIndexFileName(Target.MetaKey));
       }
       else
       {
         // if we have no file to patch, no point in trying
-        trypdiff &= (GetExistingFilename(GetFinalFileNameFromURI(Target->URI)).empty() == false);
+        trypdiff &= (GetExistingFilename(GetFinalFileNameFromURI(Target.URI)).empty() == false);
       }
 
       // no point in patching from local sources
       if (trypdiff)
       {
-        std::string const proto = Target->URI.substr(0, strlen("file:/"));
+        std::string const proto = Target.URI.substr(0, strlen("file:/"));
         if (proto == "file:/" || proto == "copy:/" || proto == "cdrom:")
            trypdiff = false;
       }
 
       // Queue the Index file (Packages, Sources, Translation-$foo, …)
+      targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
       if (trypdiff)
-         new pkgAcqDiffIndex(Owner, TransactionManager, *Target);
+         new pkgAcqDiffIndex(Owner, TransactionManager, Target);
       else
-         new pkgAcqIndex(Owner, TransactionManager, *Target);
+         new pkgAcqIndex(Owner, TransactionManager, Target);
    }
 }
                                                                        /*}}}*/
-bool pkgAcqMetaBase::VerifyVendor(string const &Message)               /*{{{*/
+bool pkgAcqMetaBase::VerifyVendor(string const &)                      /*{{{*/
 {
-   string::size_type pos;
-
-   // check for missing sigs (that where not fatal because otherwise we had
-   // bombed earlier)
-   string missingkeys;
-   string msg = _("There is no public key available for the "
-                 "following key IDs:\n");
-   pos = Message.find("NO_PUBKEY ");
-   if (pos != std::string::npos)
-   {
-      string::size_type start = pos+strlen("NO_PUBKEY ");
-      string Fingerprint = Message.substr(start, Message.find("\n")-start);
-      missingkeys += (Fingerprint);
-   }
-   if(!missingkeys.empty())
-      _error->Warning("%s", (msg + missingkeys).c_str());
-
    string Transformed = TransactionManager->MetaIndexParser->GetExpectedDist();
 
    if (Transformed == "../project/experimental")
@@ -1268,7 +1376,7 @@ bool pkgAcqMetaBase::VerifyVendor(string const &Message)          /*{{{*/
       Transformed = "experimental";
    }
 
-   pos = Transformed.rfind('/');
+   auto pos = Transformed.rfind('/');
    if (pos != string::npos)
    {
       Transformed = Transformed.substr(0, pos);
@@ -1347,15 +1455,14 @@ pkgAcqMetaBase::~pkgAcqMetaBase()
 pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire * const Owner,       /*{{{*/
       IndexTarget const &ClearsignedTarget,
       IndexTarget const &DetachedDataTarget, IndexTarget const &DetachedSigTarget,
-      std::vector<IndexTarget> const &IndexTargets,
       metaIndex * const MetaIndexParser) :
-   pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget, IndexTargets),
+   pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget),
    d(NULL), ClearsignedTarget(ClearsignedTarget),
    DetachedDataTarget(DetachedDataTarget),
    MetaIndexParser(MetaIndexParser), LastMetaIndexParser(NULL)
 {
    // index targets + (worst case:) Release/Release.gpg
-   ExpectedAdditionalItems = IndexTargets.size() + 2;
+   ExpectedAdditionalItems = std::numeric_limits<decltype(ExpectedAdditionalItems)>::max();
    TransactionManager->Add(this);
 }
                                                                        /*}}}*/
@@ -1381,7 +1488,7 @@ void pkgAcqMetaClearSig::Finished()                                       /*{{{*/
 {
    if(_config->FindB("Debug::Acquire::Transaction", false) == true)
       std::clog << "Finished: " << DestFile <<std::endl;
-   if(TransactionManager != NULL && TransactionManager->State == TransactionStarted &&
+   if(TransactionManager->State == TransactionStarted &&
       TransactionManager->TransactionHasError() == false)
       TransactionManager->CommitTransaction();
 }
@@ -1429,9 +1536,6 @@ void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig c
 {
    Item::Failed(Message, Cnf);
 
-   // we failed, we will not get additional items from this method
-   ExpectedAdditionalItems = 0;
-
    if (AuthPass == false)
    {
       if (Status == StatAuthError || Status == StatTransientNetworkError)
@@ -1450,7 +1554,7 @@ void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig c
       TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
       Status = StatDone;
 
-      new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget, IndexTargets);
+      new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget);
    }
    else
    {
@@ -1479,7 +1583,7 @@ void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig c
         if (TransactionManager->MetaIndexParser->Load(PartialRelease, &ErrorText) == false || VerifyVendor(Message) == false)
            /* expired Release files are still a problem you need extra force for */;
         else
-           QueueIndexes(true);
+           TransactionManager->QueueIndexes(true);
       }
    }
 }
@@ -1488,9 +1592,8 @@ void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig c
 pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner,             /*{{{*/
                                  pkgAcqMetaClearSig * const TransactionManager,
                                 IndexTarget const &DataTarget,
-                                IndexTarget const &DetachedSigTarget,
-                                vector<IndexTarget> const &IndexTargets) :
-   pkgAcqMetaBase(Owner, TransactionManager, IndexTargets, DataTarget), d(NULL),
+                                IndexTarget const &DetachedSigTarget) :
+   pkgAcqMetaBase(Owner, TransactionManager, DataTarget), d(NULL),
    DetachedSigTarget(DetachedSigTarget)
 {
    if(_config->FindB("Debug::Acquire::Transaction", false) == true)
@@ -1504,9 +1607,6 @@ pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner,                /*{{{*/
    Desc.Owner = this;
    Desc.ShortDesc = DataTarget.ShortDesc;
    Desc.URI = DataTarget.URI;
-
-   // we expect more item
-   ExpectedAdditionalItems = IndexTargets.size();
    QueueURI(Desc);
 }
                                                                        /*}}}*/
@@ -1541,7 +1641,7 @@ void pkgAcqMetaIndex::Failed(string const &Message,
       TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
 
       // queue without any kind of hashsum support
-      QueueIndexes(false);
+      TransactionManager->QueueIndexes(false);
    }
 }
                                                                        /*}}}*/
@@ -1687,7 +1787,7 @@ void pkgAcqMetaSig::Failed(string const &Message,pkgAcquire::MethodConfig const
       if (MetaIndex->VerifyVendor(Message) == false)
         /* expired Release files are still a problem you need extra force for */;
       else
-        MetaIndex->QueueIndexes(GoodLoad);
+        TransactionManager->QueueIndexes(GoodLoad);
 
       TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
    }
@@ -1709,6 +1809,21 @@ pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire * const Owner,
       IndexTarget const &Target)
 : pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL)
 {
+}
+                                                                       /*}}}*/
+void pkgAcqBaseIndex::Failed(std::string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
+{
+   pkgAcquire::Item::Failed(Message, Cnf);
+   if (Status != StatAuthError)
+      return;
+
+   ErrorText.append("Release file created at: ");
+   auto const timespec = TransactionManager->MetaIndexParser->GetDate();
+   if (timespec == 0)
+      ErrorText.append("<unknown>");
+   else
+      ErrorText.append(TimeRFC1123(timespec));
+   ErrorText.append("\n");
 }
                                                                        /*}}}*/
 pkgAcqBaseIndex::~pkgAcqBaseIndex() {}
@@ -1725,6 +1840,9 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire * const Owner,
                                  IndexTarget const &Target)
    : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), diffs(NULL)
 {
+   // FIXME: Magic number as an upper bound on pdiffs we will reasonably acquire
+   ExpectedAdditionalItems = 40;
+
    Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
 
    Desc.Owner = this;
@@ -1769,6 +1887,7 @@ void pkgAcqDiffIndex::QueueOnIMSHit() const                               /*{{{*/
                                                                        /*}}}*/
 bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile)      /*{{{*/
 {
+   ExpectedAdditionalItems = 0;
    // failing here is fine: our caller will take care of trying to
    // get the complete file if patching fails
    if(Debug)
@@ -2045,7 +2164,7 @@ bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
 
    // calculate the size of all patches we have to get
    unsigned short const sizeLimitPercent = _config->FindI("Acquire::PDiffs::SizeLimit", 100);
-   if (sizeLimitPercent > 0 && TransactionManager->MetaIndexParser != nullptr)
+   if (sizeLimitPercent > 0)
    {
       unsigned long long downloadSize = std::accumulate(available_patches.begin(),
            available_patches.end(), 0llu, [](unsigned long long const T, DiffInfo const &I) {
@@ -2150,8 +2269,9 @@ bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
                                                                        /*}}}*/
 void pkgAcqDiffIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
 {
-   Item::Failed(Message,Cnf);
+   pkgAcqBaseIndex::Failed(Message,Cnf);
    Status = StatDone;
+   ExpectedAdditionalItems = 0;
 
    if(Debug)
       std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
@@ -2231,7 +2351,7 @@ pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire * const Owner,
                                                                        /*}}}*/
 void pkgAcqIndexDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
 {
-   Item::Failed(Message,Cnf);
+   pkgAcqBaseIndex::Failed(Message,Cnf);
    Status = StatDone;
 
    DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
@@ -2436,7 +2556,7 @@ void pkgAcqIndexMergeDiffs::Failed(string const &Message,pkgAcquire::MethodConfi
    if(Debug)
       std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
 
-   Item::Failed(Message,Cnf);
+   pkgAcqBaseIndex::Failed(Message,Cnf);
    Status = StatDone;
 
    // check if we are the first to fail, otherwise we are done here
@@ -2611,7 +2731,7 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
    else if (CurrentCompressionExtension == "by-hash")
    {
       NextCompressionExtension(CurrentCompressionExtension, CompressionExtensions, true);
-      if(unlikely(TransactionManager->MetaIndexParser == NULL || CurrentCompressionExtension.empty()))
+      if(unlikely(CurrentCompressionExtension.empty()))
         return;
       if (CurrentCompressionExtension != "uncompressed")
       {
@@ -2674,7 +2794,7 @@ string pkgAcqIndex::Custom600Headers() const
 // AcqIndex::Failed - getting the indexfile failed                     /*{{{*/
 void pkgAcqIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
 {
-   Item::Failed(Message,Cnf);
+   pkgAcqBaseIndex::Failed(Message,Cnf);
 
    // authorisation matches will not be fixed by other compression types
    if (Status != StatAuthError)