]> git.saurik.com Git - apt.git/blobdiff - apt-pkg/acquire-item.cc
Merge branch 'debian/sid' into debian/experimental
[apt.git] / apt-pkg / acquire-item.cc
index 6bfd14992ceba8353ba045b53c931d0fbf9a2609..5187738e90b78ca7750bb488f75e34f36971e73e 100644 (file)
@@ -65,7 +65,7 @@ static void printHashSumComparision(std::string const &URI, HashStringList const
       std::cerr <<  "\t- " << hs->toStr() << std::endl;
 }
                                                                        /*}}}*/
-static void changeOwnerAndPermissionOfFile(char const * const requester, char const * const file, char const * const user, char const * const group, mode_t const mode)
+static void ChangeOwnerAndPermissionOfFile(char const * const requester, char const * const file, char const * const user, char const * const group, mode_t const mode)
 {
    // ensure the file is owned by root and has good permissions
    struct passwd const * const pw = getpwnam(user);
@@ -78,15 +78,15 @@ static void changeOwnerAndPermissionOfFile(char const * const requester, char co
    if (chmod(file, mode) != 0)
       _error->WarningE(requester, "chmod 0%o of file %s failed", mode, file);
 }
-static std::string preparePartialFile(std::string const &file)
+static std::string GetPartialFileName(std::string const &file)
 {
    std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
    DestFile += file;
    return DestFile;
 }
-static std::string preparePartialFileFromURI(std::string const &uri)
+static std::string GetPartialFileNameFromURI(std::string const &uri)
 {
-   return preparePartialFile(URItoFileName(uri));
+   return GetPartialFileName(URItoFileName(uri));
 }
 
 
@@ -125,7 +125,6 @@ pkgAcquire::Item::~Item()
    fetch this object */
 void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
 {
-   Status = StatIdle;
    if(ErrorText == "")
       ErrorText = LookupTag(Message,"Message");
    UsedMirror =  LookupTag(Message,"UsedMirror");
@@ -134,7 +133,7 @@ void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
       /* This indicates that the file is not available right now but might
          be sometime later. If we do a retry cycle then this should be
         retried [CDROMs] */
-      if (Cnf->LocalOnly == true &&
+      if (Cnf != NULL && Cnf->LocalOnly == true &&
          StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
       {
         Status = StatIdle;
@@ -143,11 +142,18 @@ void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
       }
 
       Status = StatError;
+      Complete = false;
       Dequeue();
-   }   
+   }
+   else
+      Status = StatIdle;
 
-   // report mirror failure back to LP if we actually use a mirror
+   // check fail reason
    string FailReason = LookupTag(Message, "FailReason");
+   if(FailReason == "MaximumSizeExceeded")
+      Rename(DestFile, DestFile+".FAILED");
+
+   // report mirror failure back to LP if we actually use a mirror
    if(FailReason.size() != 0)
       ReportMirrorFailure(FailReason);
    else
@@ -208,10 +214,11 @@ bool pkgAcquire::Item::Rename(string From,string To)
 
 void pkgAcquire::Item::QueueURI(ItemDesc &Item)
 {
-   if (access(DestFile.c_str(), R_OK) == 0)
+   if (RealFileExists(DestFile))
    {
-      changeOwnerAndPermissionOfFile("preparePartialFile", DestFile.c_str(), "_apt", "root", 0600);
-      std::cerr << "QUEUE ITEM: " << DestFile << std::endl;
+      std::string SandboxUser = _config->Find("APT::Sandbox::User");
+      ChangeOwnerAndPermissionOfFile("GetPartialFileName", DestFile.c_str(),
+                                     SandboxUser.c_str(), "root", 0600);
    }
    Owner->Enqueue(Item);
 }
@@ -331,11 +338,11 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
 
    RealURI = Target->URI;
    Desc.Owner = this;
-   Desc.Description = Target->Description + "/DiffIndex";
+   Desc.Description = Target->Description + ".diff/Index";
    Desc.ShortDesc = Target->ShortDesc;
    Desc.URI = Target->URI + ".diff/Index";
 
-   DestFile = preparePartialFileFromURI(Desc.URI);
+   DestFile = GetPartialFileNameFromURI(Desc.URI);
 
    if(Debug)
       std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
@@ -351,9 +358,7 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
       Desc.URI.substr(0,strlen("file:/")) == "file:/")
    {
       // we don't have a pkg file or we don't want to queue
-      if(Debug)
-        std::clog << "No index file, local or canceld by user" << std::endl;
-      Failed("", NULL);
+      Failed("No index file, local or canceld by user", NULL);
       return;
    }
 
@@ -372,7 +377,7 @@ string pkgAcqDiffIndex::Custom600Headers() const
 {
    string Final = _config->FindDir("Dir::State::lists");
    Final += URItoFileName(Desc.URI);
-   
+
    if(Debug)
       std::clog << "Custom600Header-IMS: " << Final << std::endl;
 
@@ -385,181 +390,287 @@ string pkgAcqDiffIndex::Custom600Headers() const
                                                                        /*}}}*/
 bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile)             /*{{{*/
 {
+   // failing here is fine: our caller will take care of trying to
+   // get the complete file if patching fails
    if(Debug)
       std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
         << std::endl;
 
-   pkgTagSection Tags;
-   string ServerSha1;
-   vector<DiffInfo> available_patches;
-   
    FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
    pkgTagFile TF(&Fd);
    if (_error->PendingError() == true)
       return false;
 
-   if(TF.Step(Tags) == true)
+   pkgTagSection Tags;
+   if(unlikely(TF.Step(Tags) == false))
+      return false;
+
+   HashStringList ServerHashes;
+   unsigned long long ServerSize = 0;
+
+   for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
    {
-      bool found = false;
-      DiffInfo d;
-      string size;
+      std::string tagname = *type;
+      tagname.append("-Current");
+      std::string const tmp = Tags.FindS(tagname.c_str());
+      if (tmp.empty() == true)
+        continue;
 
-      string const tmp = Tags.FindS("SHA1-Current");
+      string hash;
+      unsigned long long size;
       std::stringstream ss(tmp);
-      ss >> ServerSha1 >> size;
-      unsigned long const ServerSize = atol(size.c_str());
+      ss >> hash >> size;
+      if (unlikely(hash.empty() == true))
+        continue;
+      if (unlikely(ServerSize != 0 && ServerSize != size))
+        continue;
+      ServerHashes.push_back(HashString(*type, hash));
+      ServerSize = size;
+   }
 
-      FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
-      SHA1Summation SHA1;
-      SHA1.AddFD(fd);
-      string const local_sha1 = SHA1.Result();
+   if (ServerHashes.usable() == false)
+   {
+      if (Debug == true)
+        std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
+      return false;
+   }
 
-      if(local_sha1 == ServerSha1)
+   if (ServerHashes != HashSums())
+   {
+      if (Debug == true)
       {
-        // we have the same sha1 as the server so we are done here
-        if(Debug)
-           std::clog << "Package file is up-to-date" << std::endl;
-         // ensure we have no leftovers from previous runs
-         std::string Partial = _config->FindDir("Dir::State::lists");
-         Partial += "partial/" + URItoFileName(RealURI);
-         unlink(Partial.c_str());
-        // list cleanup needs to know that this file as well as the already
-        // present index is ours, so we create an empty diff to save it for us
-        new pkgAcqIndexDiffs(Owner, TransactionManager, Target, 
-                              ExpectedHashes, MetaIndexParser, 
-                              ServerSha1, available_patches);
-        return true;
+        std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
+         printHashSumComparision(CurrentPackagesFile, ServerHashes, HashSums());
       }
-      else
+      return false;
+   }
+
+   if (ServerHashes.VerifyFile(CurrentPackagesFile) == true)
+   {
+      // we have the same sha1 as the server so we are done here
+      if(Debug)
+        std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
+
+      // list cleanup needs to know that this file as well as the already
+      // present index is ours, so we create an empty diff to save it for us
+      new pkgAcqIndexDiffs(Owner, TransactionManager, Target,
+                           ExpectedHashes, MetaIndexParser);
+      return true;
+   }
+
+   FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
+   Hashes LocalHashesCalc;
+   LocalHashesCalc.AddFD(fd);
+   HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+   if(Debug)
+      std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
+        << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
+
+   // parse all of (provided) history
+   vector<DiffInfo> available_patches;
+   bool firstAcceptedHashes = true;
+   for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+   {
+      if (LocalHashes.find(*type) == NULL)
+        continue;
+
+      std::string tagname = *type;
+      tagname.append("-History");
+      std::string const tmp = Tags.FindS(tagname.c_str());
+      if (tmp.empty() == true)
+        continue;
+
+      string hash, filename;
+      unsigned long long size;
+      std::stringstream ss(tmp);
+
+      while (ss >> hash >> size >> filename)
       {
-        if(Debug)
-           std::clog << "SHA1-Current: " << ServerSha1 << " and we start at "<< fd.Name() << " " << fd.Size() << " " << local_sha1 << std::endl;
+        if (unlikely(hash.empty() == true || filename.empty() == true))
+           continue;
 
-        // check the historie and see what patches we need
-        string const history = Tags.FindS("SHA1-History");
-        std::stringstream hist(history);
-        while(hist >> d.sha1 >> size >> d.file)
+        // see if we have a record for this file already
+        std::vector<DiffInfo>::iterator cur = available_patches.begin();
+        for (; cur != available_patches.end(); ++cur)
         {
-           // read until the first match is found
-           // from that point on, we probably need all diffs
-           if(d.sha1 == local_sha1) 
-              found=true;
-           else if (found == false)
+           if (cur->file != filename || unlikely(cur->result_size != size))
               continue;
-
-           if(Debug)
-              std::clog << "Need to get diff: " << d.file << std::endl;
-           available_patches.push_back(d);
+           cur->result_hashes.push_back(HashString(*type, hash));
+           break;
         }
-
-        if (available_patches.empty() == false)
+        if (cur != available_patches.end())
+           continue;
+        if (firstAcceptedHashes == true)
+        {
+           DiffInfo next;
+           next.file = filename;
+           next.result_hashes.push_back(HashString(*type, hash));
+           next.result_size = size;
+           next.patch_size = 0;
+           available_patches.push_back(next);
+        }
+        else
         {
-           // patching with too many files is rather slow compared to a fast download
-           unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
-           if (fileLimit != 0 && fileLimit < available_patches.size())
-           {
-              if (Debug)
-                 std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
-                       << ") so fallback to complete download" << std::endl;
-              return false;
-           }
-
-           // see if the patches are too big
-           found = false; // it was true and it will be true again at the end
-           d = *available_patches.begin();
-           string const firstPatch = d.file;
-           unsigned long patchesSize = 0;
-           std::stringstream patches(Tags.FindS("SHA1-Patches"));
-           while(patches >> d.sha1 >> size >> d.file)
-           {
-              if (firstPatch == d.file)
-                 found = true;
-              else if (found == false)
-                 continue;
-
-              patchesSize += atol(size.c_str());
-           }
-           unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
-           if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
-           {
-              if (Debug)
-                 std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
-                       << ") so fallback to complete download" << std::endl;
-              return false;
-           }
+           if (Debug == true)
+              std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+                 << " wasn't in the list for the first parsed hash! (history)" << std::endl;
+           break;
         }
       }
+      firstAcceptedHashes = false;
+   }
+
+   if (unlikely(available_patches.empty() == true))
+   {
+      if (Debug)
+        std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+           << "Couldn't find any patches for the patch series." << std::endl;
+      return false;
+   }
+
+   for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+   {
+      if (LocalHashes.find(*type) == NULL)
+        continue;
+
+      std::string tagname = *type;
+      tagname.append("-Patches");
+      std::string const tmp = Tags.FindS(tagname.c_str());
+      if (tmp.empty() == true)
+        continue;
+
+      string hash, filename;
+      unsigned long long size;
+      std::stringstream ss(tmp);
 
-      // we have something, queue the next diff
-      if(found)
+      while (ss >> hash >> size >> filename)
       {
-         // FIXME: make this use the method
-         PackagesFileReadyInPartial = true;
-        std::string const Partial = preparePartialFileFromURI(RealURI);
-
-         FileFd From(CurrentPackagesFile, FileFd::ReadOnly);
-         FileFd To(Partial, FileFd::WriteEmpty);
-         if(CopyFile(From, To) == false)
-            return _error->Errno("CopyFile", "failed to copy");
-
-         if(Debug)
-            std::cerr << "Done copying " << CurrentPackagesFile
-                      << " -> " << Partial
-                      << std::endl;
+        if (unlikely(hash.empty() == true || filename.empty() == true))
+           continue;
 
-        // queue the diffs
-        string::size_type const last_space = Description.rfind(" ");
-        if(last_space != string::npos)
-           Description.erase(last_space, Description.size()-last_space);
-
-        /* decide if we should download patches one by one or in one go:
-           The first is good if the server merges patches, but many don't so client
-           based merging can be attempt in which case the second is better.
-           "bad things" will happen if patches are merged on the server,
-           but client side merging is attempt as well */
-        bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
-        if (pdiff_merge == true)
+        // see if we have a record for this file already
+        std::vector<DiffInfo>::iterator cur = available_patches.begin();
+        for (; cur != available_patches.end(); ++cur)
         {
-           // reprepro adds this flag if it has merged patches on the server
-           std::string const precedence = Tags.FindS("X-Patch-Precedence");
-           pdiff_merge = (precedence != "merged");
+           if (cur->file != filename)
+              continue;
+           if (unlikely(cur->patch_size != 0 && cur->patch_size != size))
+              continue;
+           cur->patch_hashes.push_back(HashString(*type, hash));
+           cur->patch_size = size;
+           break;
         }
+        if (cur != available_patches.end())
+              continue;
+        if (Debug == true)
+           std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+              << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
+        break;
+      }
+   }
 
-        if (pdiff_merge == false)
-         {
-           new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes, 
-                                 MetaIndexParser,
-                                 ServerSha1, available_patches);
-         }
-         else
-        {
-           std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
-           for(size_t i = 0; i < available_patches.size(); ++i)
-              (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner,
-                                                       TransactionManager,
-                                                       Target,
-                                                       ExpectedHashes,
-                                                       MetaIndexParser,
-                                                       available_patches[i],
-                                                       diffs);
-        }
+   bool foundStart = false;
+   for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
+        cur != available_patches.end(); ++cur)
+   {
+      if (LocalHashes != cur->result_hashes)
+        continue;
 
-         Complete = false;
-         Status = StatDone;
-         Dequeue();
-         return true;
-      }
+      available_patches.erase(available_patches.begin(), cur);
+      foundStart = true;
+      break;
+   }
+
+   if (foundStart == false || unlikely(available_patches.empty() == true))
+   {
+      if (Debug)
+        std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+           << "Couldn't find the start of the patch series." << std::endl;
+      return false;
+   }
+
+   // patching with too many files is rather slow compared to a fast download
+   unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+   if (fileLimit != 0 && fileLimit < available_patches.size())
+   {
+      if (Debug)
+        std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+           << ") so fallback to complete download" << std::endl;
+      return false;
+   }
+
+   // calculate the size of all patches we have to get
+   // note that all sizes are uncompressed, while we download compressed files
+   unsigned long long patchesSize = 0;
+   for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin();
+        cur != available_patches.end(); ++cur)
+      patchesSize += cur->patch_size;
+   unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+   if (false && sizeLimit > 0 && (sizeLimit/100) < patchesSize)
+   {
+      if (Debug)
+        std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+           << ") so fallback to complete download" << std::endl;
+      return false;
    }
+
+   // FIXME: make this use the method
+   PackagesFileReadyInPartial = true;
+   std::string const Partial = GetPartialFileNameFromURI(RealURI);
+   
+   FileFd From(CurrentPackagesFile, FileFd::ReadOnly);
+   FileFd To(Partial, FileFd::WriteEmpty);
+   if(CopyFile(From, To) == false)
+      return _error->Errno("CopyFile", "failed to copy");
    
-   // Nothing found, report and return false
-   // Failing here is ok, if we return false later, the full
-   // IndexFile is queued
    if(Debug)
-      std::clog << "Can't find a patch in the index file" << std::endl;
-   return false;
+      std::cerr << "Done copying " << CurrentPackagesFile
+                << " -> " << Partial
+                << std::endl;
+
+   // we have something, queue the diffs
+   string::size_type const last_space = Description.rfind(" ");
+   if(last_space != string::npos)
+      Description.erase(last_space, Description.size()-last_space);
+
+   /* decide if we should download patches one by one or in one go:
+      The first is good if the server merges patches, but many don't so client
+      based merging can be attempt in which case the second is better.
+      "bad things" will happen if patches are merged on the server,
+      but client side merging is attempt as well */
+   bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
+   if (pdiff_merge == true)
+   {
+      // reprepro adds this flag if it has merged patches on the server
+      std::string const precedence = Tags.FindS("X-Patch-Precedence");
+      pdiff_merge = (precedence != "merged");
+   }
+
+   if (pdiff_merge == false)
+   {
+      new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes, 
+                           MetaIndexParser, available_patches);
+   }
+   else
+   {
+      std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
+      for(size_t i = 0; i < available_patches.size(); ++i)
+        (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager,
+               Target,
+              ExpectedHashes,
+              MetaIndexParser,
+              available_patches[i],
+              diffs);
+   }
+
+   Complete = false;
+   Status = StatDone;
+   Dequeue();
+   return true;
 }
                                                                        /*}}}*/
-void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/
+void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/
 {
    if(Debug)
       std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
@@ -567,9 +678,8 @@ void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/
 
    new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser);
 
-   Complete = false;
+   Item::Failed(Message,Cnf);
    Status = StatDone;
-   Dequeue();
 }
                                                                        /*}}}*/
 void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes,        /*{{{*/
@@ -595,13 +705,17 @@ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList
 
    }
 
+   string FinalFile;
+   FinalFile = _config->FindDir("Dir::State::lists");
+   FinalFile += URItoFileName(Desc.URI);
+
+   if(StringToBool(LookupTag(Message,"IMS-Hit"),false))
+      DestFile = FinalFile;
+
    if(!ParseDiffIndex(DestFile))
-      return Failed("", NULL);
+      return Failed("Message: Couldn't parse pdiff index", Cnf);
 
    // queue for final move
-   string FinalFile;
-   FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
-   FinalFile += string(".IndexDiff");
    TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
 
    Complete = true;
@@ -620,12 +734,11 @@ pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
                                    struct IndexTarget const * const Target,
                                    HashStringList const &ExpectedHashes,
                                    indexRecords *MetaIndexParser,
-                                  string ServerSha1,
                                   vector<DiffInfo> diffs)
    : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser),
-     available_patches(diffs), ServerSha1(ServerSha1)
+     available_patches(diffs)
 {
-   DestFile = preparePartialFileFromURI(Target->URI);
+   DestFile = GetPartialFileNameFromURI(Target->URI);
 
    Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
 
@@ -678,16 +791,9 @@ void pkgAcqIndexDiffs::Finish(bool allDone)
       }
 
       // queue for copy
-      PartialFile = preparePartialFileFromURI(RealURI);
-
-      DestFile = _config->FindDir("Dir::State::lists");
-      DestFile += URItoFileName(RealURI);
-
-      // this happens if we have a up-to-date indexfile
-      if(!FileExists(PartialFile))
-         PartialFile = DestFile;
-
-      TransactionManager->TransactionStageCopy(this, PartialFile, DestFile);
+      std::string FinalFile = _config->FindDir("Dir::State::lists");
+      FinalFile += URItoFileName(RealURI);
+      TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
 
       // this is for the "real" finish
       Complete = true;
@@ -709,25 +815,31 @@ void pkgAcqIndexDiffs::Finish(bool allDone)
 bool pkgAcqIndexDiffs::QueueNextDiff()                                 /*{{{*/
 {
    // calc sha1 of the just patched file
-   std::string const FinalFile = preparePartialFileFromURI(RealURI);
+   std::string const FinalFile = GetPartialFileNameFromURI(RealURI);
 
    if(!FileExists(FinalFile))
    {
-      Failed("No FinalFile " + FinalFile + " available", NULL);
+      Failed("Message: No FinalFile " + FinalFile + " available", NULL);
       return false;
    }
 
    FileFd fd(FinalFile, FileFd::ReadOnly);
-   SHA1Summation SHA1;
-   SHA1.AddFD(fd);
-   string local_sha1 = string(SHA1.Result());
+   Hashes LocalHashesCalc;
+   LocalHashesCalc.AddFD(fd);
+   HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
    if(Debug)
-      std::clog << "QueueNextDiff: " 
-               << FinalFile << " (" << local_sha1 << ")"<<std::endl;
+      std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
+
+   if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false))
+   {
+      Failed("Local/Expected hashes are not usable", NULL);
+      return false;
+   }
 
 
    // final file reached before all patches are applied
-   if(local_sha1 == ServerSha1)
+   if(LocalHashes == ExpectedHashes)
    {
       Finish(true);
       return true;
@@ -735,10 +847,10 @@ bool pkgAcqIndexDiffs::QueueNextDiff()                                    /*{{{*/
 
    // remove all patches until the next matching patch is found
    // this requires the Index file to be ordered
-   for(vector<DiffInfo>::iterator I=available_patches.begin();
+   for(vector<DiffInfo>::iterator I = available_patches.begin();
        available_patches.empty() == false &&
          I != available_patches.end() &&
-         I->sha1 != local_sha1;
+         I->result_hashes != LocalHashes;
        ++I)
    {
       available_patches.erase(I);
@@ -747,18 +859,18 @@ bool pkgAcqIndexDiffs::QueueNextDiff()                                    /*{{{*/
    // error checking and falling back if no patch was found
    if(available_patches.empty() == true)
    {
-      Failed("No patches available", NULL);
+      Failed("No patches left to reach target", NULL);
       return false;
    }
 
    // queue the right diff
    Desc.URI = RealURI + ".diff/" + available_patches[0].file + ".gz";
    Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
-   DestFile = preparePartialFileFromURI(RealURI + ".diff/" + available_patches[0].file);
+   DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + available_patches[0].file);
 
    if(Debug)
       std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
-   
+
    QueueURI(Desc);
 
    return true;
@@ -773,11 +885,22 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi
    Item::Done(Message, Size, Hashes, Cnf);
 
    // FIXME: verify this download too before feeding it to rred
-   std::string const FinalFile = preparePartialFileFromURI(RealURI);
+   std::string const FinalFile = GetPartialFileNameFromURI(RealURI);
 
    // success in downloading a diff, enter ApplyDiff state
    if(State == StateFetchDiff)
    {
+      FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+      class Hashes LocalHashesCalc;
+      LocalHashesCalc.AddFD(fd);
+      HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+      if (fd.Size() != available_patches[0].patch_size ||
+           available_patches[0].patch_hashes != LocalHashes)
+      {
+        Failed("Patch has Size/Hashsum mismatch", NULL);
+        return;
+      }
 
       // rred excepts the patch as $FinalFile.ed
       Rename(DestFile,FinalFile+".ed");
@@ -814,7 +937,7 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi
       if(available_patches.empty() == false) {
         new pkgAcqIndexDiffs(Owner, TransactionManager, Target,
                              ExpectedHashes, MetaIndexParser,
-                              ServerSha1, available_patches);
+                             available_patches);
         return Finish();
       } else 
          // update
@@ -844,7 +967,7 @@ pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner,
    Desc.URI = RealURI + ".diff/" + patch.file + ".gz";
    Desc.Description = Description + " " + patch.file + string(".pdiff");
 
-   DestFile = preparePartialFileFromURI(RealURI + ".diff/" + patch.file);
+   DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + patch.file);
 
    if(Debug)
       std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
@@ -852,13 +975,13 @@ pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner,
    QueueURI(Desc);
 }
                                                                        /*}}}*/
-void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/
+void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/
 {
    if(Debug)
       std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
-   Complete = false;
+
+   Item::Failed(Message,Cnf);
    Status = StatDone;
-   Dequeue();
 
    // check if we are the first to fail, otherwise we are done here
    State = StateDoneDiff;
@@ -882,10 +1005,21 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri
    Item::Done(Message,Size,Hashes,Cnf);
 
    // FIXME: verify download before feeding it to rred
-   string const FinalFile = preparePartialFileFromURI(RealURI);
+   string const FinalFile = GetPartialFileNameFromURI(RealURI);
 
    if (State == StateFetchDiff)
    {
+      FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+      class Hashes LocalHashesCalc;
+      LocalHashesCalc.AddFD(fd);
+      HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+      if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes)
+      {
+        Failed("Patch has Size/Hashsum mismatch", NULL);
+        return;
+      }
+
       // rred expects the patch as $FinalFile.ed.$patchname.gz
       Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz");
 
@@ -938,10 +1072,9 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri
       for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
            I != allPatches->end(); ++I)
       {
-            std::string const PartialFile = preparePartialFileFromURI(RealURI);
-           std::string patch = PartialFile + ".ed." + (*I)->patch.file + ".gz";
-            std::cerr << patch << std::endl;
-           unlink(patch.c_str());
+        std::string const PartialFile = GetPartialFileNameFromURI(RealURI);
+        std::string patch = PartialFile + ".ed." + (*I)->patch.file + ".gz";
+        unlink(patch.c_str());
       }
 
       // all set and done
@@ -1036,7 +1169,7 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
 {
    Stage = STAGE_DOWNLOAD;
 
-   DestFile = preparePartialFileFromURI(URI);
+   DestFile = GetPartialFileNameFromURI(URI);
 
    CurrentCompressionExtension = CompressionExtensions.substr(0, CompressionExtensions.find(' '));
    if (CurrentCompressionExtension == "uncompressed")
@@ -1153,7 +1286,12 @@ void pkgAcqIndex::ReverifyAfterIMS()
 {
    // update destfile to *not* include the compression extension when doing
    // a reverify (as its uncompressed on disk already)
-   DestFile = preparePartialFileFromURI(RealURI);
+   DestFile = GetPartialFileNameFromURI(RealURI);
+
+   // do not reverify cdrom sources as apt-cdrom may rewrite the Packages
+   // file when its doing the indexcopy
+   if (RealURI.substr(0,6) == "cdrom:")
+      return;
 
    // adjust DestFile if its compressed on disk
    if (_config->FindB("Acquire::GzipIndexes",false) == true)
@@ -1265,11 +1403,6 @@ void pkgAcqIndex::StageDownloadDone(string Message,
    // on if-modfied-since hit to avoid a stale attack against us
    if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
    {
-      // do not reverify cdrom sources as apt-cdrom may rewrite the Packages
-      // file when its doing the indexcopy
-      if (RealURI.substr(0,6) == "cdrom:")
-         return;
-
       // The files timestamp matches, reverify by copy into partial/
       EraseFileName = "";
       ReverifyAfterIMS();
@@ -1279,12 +1412,12 @@ void pkgAcqIndex::StageDownloadDone(string Message,
    // If we have compressed indexes enabled, queue for hash verification
    if (_config->FindB("Acquire::GzipIndexes",false))
    {
-      DestFile = preparePartialFileFromURI(RealURI + '.' + CurrentCompressionExtension);
+      DestFile = GetPartialFileNameFromURI(RealURI + '.' + CurrentCompressionExtension);
       EraseFileName = "";
       Stage = STAGE_DECOMPRESS_AND_VERIFY;
       Desc.URI = "copy:" + FileName;
       QueueURI(Desc);
-
+      SetActiveSubprocess("copy");
       return;
     }
 
@@ -1305,7 +1438,6 @@ void pkgAcqIndex::StageDownloadDone(string Message,
    DestFile += ".decomp";
    Desc.URI = decompProg + ":" + FileName;
    QueueURI(Desc);
-
    SetActiveSubprocess(decompProg);
 }
                                                                        /*}}}*/
@@ -1354,10 +1486,6 @@ pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner,
                                    indexRecords *MetaIndexParser)
    : pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser)
 {
-   // load the filesize
-   indexRecords::checkSum *Record = MetaIndexParser->Lookup(string(Target->MetaKey));
-   if(Record)
-      FileSize = Record->Size;
 }
                                                                        /*}}}*/
 // AcqIndexTrans::Custom600Headers - Insert custom request headers     /*{{{*/
@@ -1383,18 +1511,15 @@ void pkgAcqIndexTrans::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
       return;
    }
 
+   Item::Failed(Message,Cnf);
+
    // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
    if (Cnf->LocalOnly == true ||
        StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
    {
       // Ignore this
       Status = StatDone;
-      Complete = false;
-      Dequeue();
-      return;
    }
-
-   Item::Failed(Message,Cnf);
 }
                                                                        /*}}}*/
 // AcqMetaBase::Add - Add a item to the current Transaction            /*{{{*/
@@ -1419,10 +1544,13 @@ void pkgAcqMetaBase::AbortTransaction()
       if ((*I)->Status == pkgAcquire::Item::StatIdle)
          (*I)->Status = pkgAcquire::Item::StatDone;
 
-      // kill files in partial
-      std::string const PartialFile = preparePartialFile(flNotDir((*I)->DestFile));
-      if(FileExists(PartialFile))
-         Rename(PartialFile, PartialFile + ".FAILED");
+      // kill failed files in partial
+      if ((*I)->Status == pkgAcquire::Item::StatError)
+      {
+         std::string const PartialFile = GetPartialFileName(flNotDir((*I)->DestFile));
+         if(FileExists(PartialFile))
+            Rename(PartialFile, PartialFile + ".FAILED");
+      }
    }
 }
                                                                        /*}}}*/
@@ -1456,7 +1584,7 @@ void pkgAcqMetaBase::CommitTransaction()
               << (*I)->DescURI() << std::endl;
 
         Rename((*I)->PartialFile, (*I)->DestFile);
-        changeOwnerAndPermissionOfFile("CommitTransaction", (*I)->DestFile.c_str(), "root", "root", 0644);
+        ChangeOwnerAndPermissionOfFile("CommitTransaction", (*I)->DestFile.c_str(), "root", "root", 0644);
 
       } else {
          if(_config->FindB("Debug::Acquire::Transaction", false) == true)
@@ -1568,14 +1696,8 @@ pkgAcqMetaSig::~pkgAcqMetaSig()                                          /*{{{*/
 // ---------------------------------------------------------------------
 string pkgAcqMetaSig::Custom600Headers() const
 {
-   string FinalFile = _config->FindDir("Dir::State::lists");
-   FinalFile += URItoFileName(RealURI);
-
-   struct stat Buf;
-   if (stat(FinalFile.c_str(),&Buf) != 0)
-      return "\nIndex-File: true";
-
-   return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
+   std::string Header = GetCustom600Headers(RealURI);
+   return Header;
 }
                                                                        /*}}}*/
 // pkgAcqMetaSig::Done - The signature was downloaded/verified         /*{{{*/
@@ -1639,19 +1761,23 @@ void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/
       } else {
          _error->Error("%s", downgrade_msg.c_str());
          Rename(MetaIndexFile, MetaIndexFile+".FAILED");
-         Status = pkgAcquire::Item::StatError;
+        Item::Failed("Message: " + downgrade_msg, Cnf);
          TransactionManager->AbortTransaction();
          return;
       }
    }
+   else
+      _error->Warning(_("The data from '%s' is not signed. Packages "
+              "from that repository can not be authenticated."),
+           URIDesc.c_str());
 
    // this ensures that any file in the lists/ dir is removed by the
    // transaction
-   DestFile = preparePartialFileFromURI(RealURI);
+   DestFile = GetPartialFileNameFromURI(RealURI);
    TransactionManager->TransactionStageRemoval(this, DestFile);
 
    // only allow going further if the users explicitely wants it
-   if(_config->FindB("Acquire::AllowInsecureRepositories") == true)
+   if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true)
    {
       // we parse the indexes here because at this point the user wanted
       // a repository that may potentially harm him
@@ -1660,20 +1786,18 @@ void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/
    } 
    else 
    {
-      _error->Warning("Use --allow-insecure-repositories to force the update");
+      _error->Error("Use --allow-insecure-repositories to force the update");
    }
 
+   Item::Failed(Message,Cnf);
+
    // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
-   if (Cnf->LocalOnly == true || 
+   if (Cnf->LocalOnly == true ||
        StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
-   {      
+   {
       // Ignore this
       Status = StatDone;
-      Complete = false;
-      Dequeue();
-      return;
    }
-   Item::Failed(Message,Cnf);
 }
                                                                        /*}}}*/
 pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner,                    /*{{{*/
@@ -1705,7 +1829,7 @@ pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner,                       /*{{{*/
 // pkgAcqMetaIndex::Init - Delayed constructor                         /*{{{*/
 void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc)
 {
-   DestFile = preparePartialFileFromURI(RealURI);
+   DestFile = GetPartialFileNameFromURI(RealURI);
 
    // Create the item
    Desc.Description = URIDesc;
@@ -1722,14 +1846,7 @@ void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc)
 // ---------------------------------------------------------------------
 string pkgAcqMetaIndex::Custom600Headers() const
 {
-   string Final = _config->FindDir("Dir::State::lists");
-   Final += URItoFileName(RealURI);
-   
-   struct stat Buf;
-   if (stat(Final.c_str(),&Buf) != 0)
-      return "\nIndex-File: true";
-   
-   return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
+   return GetCustom600Headers(RealURI);
 }
                                                                        /*}}}*/
 void pkgAcqMetaIndex::Done(string Message,unsigned long long Size,     /*{{{*/
@@ -1790,6 +1907,26 @@ bool pkgAcqMetaBase::CheckAuthDone(string Message, const string &RealURI)        /*{{{*
    return true;
 }
                                                                        /*}}}*/
+// pkgAcqMetaBase::GetCustom600Headers - Get header for AcqMetaBase     /*{{{*/
+// ---------------------------------------------------------------------
+string pkgAcqMetaBase::GetCustom600Headers(const string &RealURI) const
+{
+   std::string Header = "\nIndex-File: true";
+   std::string MaximumSize;
+   strprintf(MaximumSize, "\nMaximum-Size: %i",
+             _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
+   Header += MaximumSize;
+
+   string FinalFile = _config->FindDir("Dir::State::lists");
+   FinalFile += URItoFileName(RealURI);
+
+   struct stat Buf;
+   if (stat(FinalFile.c_str(),&Buf) == 0)
+      Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
+
+   return Header;
+}
+                                                                       /*}}}*/
 // pkgAcqMetaBase::QueueForSignatureVerify                             /*{{{*/
 void pkgAcqMetaBase::QueueForSignatureVerify(const std::string &MetaIndexFile,
                                     const std::string &MetaIndexFileSignature)
@@ -2003,9 +2140,12 @@ bool pkgAcqMetaBase::VerifyVendor(string Message, const string &RealURI)/*{{{*/
 }
                                                                        /*}}}*/
 // pkgAcqMetaIndex::Failed - no Release file present                   /*{{{*/
-void pkgAcqMetaIndex::Failed(string /*Message*/,
-                             pkgAcquire::MethodConfig * /*Cnf*/)
+void pkgAcqMetaIndex::Failed(string Message,
+                             pkgAcquire::MethodConfig * Cnf)
 {
+   pkgAcquire::Item::Failed(Message, Cnf);
+   Status = StatDone;
+
    string FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
 
    _error->Warning(_("The repository '%s' does not have a Release file. "
@@ -2015,7 +2155,7 @@ void pkgAcqMetaIndex::Failed(string /*Message*/,
    // No Release file was present so fall
    // back to queueing Packages files without verification
    // only allow going further if the users explicitely wants it
-   if(_config->FindB("Acquire::AllowInsecureRepositories") == true)
+   if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true)
    {
       // Done, queue for rename on transaction finished
       if (FileExists(DestFile)) 
@@ -2025,7 +2165,7 @@ void pkgAcqMetaIndex::Failed(string /*Message*/,
       QueueIndexes(false);
    } else {
       // warn if the repository is unsinged
-      _error->Warning("Use --allow-insecure-repositories to force the update");
+      _error->Error("Use --allow-insecure-repositories to force the update");
       TransactionManager->AbortTransaction();
       Status = StatError;
       return;
@@ -2064,17 +2204,9 @@ pkgAcqMetaClearSig::~pkgAcqMetaClearSig()                                /*{{{*/
 // ---------------------------------------------------------------------
 string pkgAcqMetaClearSig::Custom600Headers() const
 {
-   string Final = _config->FindDir("Dir::State::lists");
-   Final += URItoFileName(RealURI);
-
-   struct stat Buf;
-   if (stat(Final.c_str(),&Buf) != 0)
-   {
-      if (stat(Final.c_str(),&Buf) != 0)
-        return "\nIndex-File: true\nFail-Ignore: true\n";
-   }
-
-   return "\nIndex-File: true\nFail-Ignore: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
+   string Header = GetCustom600Headers(RealURI);
+   Header += "\nFail-Ignore: true";
+   return Header;
 }
                                                                        /*}}}*/
 // pkgAcqMetaClearSig::Done - We got a file                            /*{{{*/
@@ -2115,6 +2247,8 @@ void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long /*Size*/,
                                                                        /*}}}*/
 void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
 {
+   Item::Failed(Message, Cnf);
+
    // we failed, we will not get additional items from this method
    ExpectedAdditionalItems = 0;
 
@@ -2126,14 +2260,12 @@ void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*
       string FinalFile = _config->FindDir("Dir::State::lists");
       FinalFile.append(URItoFileName(RealURI));
       TransactionManager->TransactionStageRemoval(this, FinalFile);
+      Status = StatDone;
 
       new pkgAcqMetaIndex(Owner, TransactionManager,
                        MetaIndexURI, MetaIndexURIDesc, MetaIndexShortDesc,
                        MetaSigURI, MetaSigURIDesc, MetaSigShortDesc,
                        IndexTargets, MetaIndexParser);
-      if (Cnf->LocalOnly == true ||
-         StringToBool(LookupTag(Message, "Transient-Failure"), false) == false)
-        Dequeue();
    }
    else
    {
@@ -2147,11 +2279,13 @@ void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*
       // No Release file was present, or verification failed, so fall
       // back to queueing Packages files without verification
       // only allow going further if the users explicitely wants it
-      if(_config->FindB("Acquire::AllowInsecureRepositories") == true)
+      if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true)
       {
+        Status = StatDone;
+
          /* Always move the meta index, even if gpgv failed. This ensures
           * that PackageFile objects are correctly filled in */
-         if (FileExists(DestFile)) 
+         if (FileExists(DestFile))
          {
             string FinalFile = _config->FindDir("Dir::State::lists");
             FinalFile += URItoFileName(RealURI);
@@ -2161,19 +2295,17 @@ void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*
                                       "Release");
             FinalFile = FinalFile.replace(FinalFile.rfind("InRelease"), 9,
                                           "Release");
-            
-            
+
             // Done, queue for rename on transaction finished
             TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
          }
          QueueIndexes(false);
       } else {
-         // warn if the repository is unsinged
-         _error->Warning("Use --allow-insecure-repositories to force the update");
+         // warn if the repository is unsigned
+         _error->Error("Use --allow-insecure-repositories to force the update");
          TransactionManager->AbortTransaction();
          Status = StatError;
-         return;
-      } 
+      }
    }
 }
                                                                        /*}}}*/
@@ -2354,7 +2486,8 @@ bool pkgAcqArchive::QueueNext()
         else
         {
            PartialSize = Buf.st_size;
-           changeOwnerAndPermissionOfFile("pkgAcqArchive::QueueNext", FinalFile.c_str(), "_apt", "root", 0600);
+            std::string SandboxUser = _config->Find("APT::Sandbox::User");
+           ChangeOwnerAndPermissionOfFile("pkgAcqArchive::QueueNext",DestFile.c_str(), SandboxUser.c_str(), "root", 0600);
         }
       }
 
@@ -2424,7 +2557,7 @@ void pkgAcqArchive::Done(string Message,unsigned long long Size, HashStringList
    string FinalFile = _config->FindDir("Dir::Cache::Archives");
    FinalFile += flNotDir(StoreFilename);
    Rename(DestFile,FinalFile);
-   changeOwnerAndPermissionOfFile("pkgAcqArchive::Done", FinalFile.c_str(), "root", "root", 0644);
+   ChangeOwnerAndPermissionOfFile("pkgAcqArchive::Done", FinalFile.c_str(), "root", "root", 0644);
    StoreFilename = DestFile = FinalFile;
    Complete = true;
 }
@@ -2522,7 +2655,8 @@ pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI, HashStringList const &Hashe
       else
       {
         PartialSize = Buf.st_size;
-        changeOwnerAndPermissionOfFile("pkgAcqFile", DestFile.c_str(), "_apt", "root", 0600);
+         std::string SandboxUser = _config->Find("APT::Sandbox::User");
+        ChangeOwnerAndPermissionOfFile("pkgAcqFile", DestFile.c_str(), SandboxUser.c_str(), "root", 0600);
       }
    }