##################################################################### */
/*}}}*/
// Include Files /*{{{*/
-#ifdef __GNUG__
-#pragma implementation "apt-pkg/acquire-item.h"
-#endif
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/configuration.h>
+#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/sourcelist.h>
#include <apt-pkg/vendorlist.h>
#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/md5.h>
+#include <apt-pkg/sha1.h>
+#include <apt-pkg/tagfile.h>
#include <apti18n.h>
#include <unistd.h>
#include <errno.h>
#include <string>
+#include <sstream>
#include <stdio.h>
/*}}}*/
// Acquire::Item::Done - Item downloaded OK /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcquire::Item::Done(string Message,unsigned long Size,string,
+void pkgAcquire::Item::Done(string Message,unsigned long Size,string Hash,
pkgAcquire::MethodConfig *Cnf)
{
// We just downloaded something..
string FileName = LookupTag(Message,"Filename");
- if (Complete == false && FileName == DestFile)
+ // we only inform the Log class if it was actually not a local thing
+ if (Complete == false && !Local && FileName == DestFile)
{
if (Owner->Log != 0)
Owner->Log->Fetched(Size,atoi(LookupTag(Message,"Resume-Point","0").c_str()));
}
}
/*}}}*/
+// AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* Get the DiffIndex file first and see if there are patches availabe
+ * If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
+ * patches. If anything goes wrong in that process, it will fall back to
+ * the original packages file
+ */
+pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
+ string URI,string URIDesc,string ShortDesc,
+ HashString ExpectedHash)
+ : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
+ Description(URIDesc)
+{
+
+ Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
+
+ Desc.Description = URIDesc + "/DiffIndex";
+ Desc.Owner = this;
+ Desc.ShortDesc = ShortDesc;
+ Desc.URI = URI + ".diff/Index";
+
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(URI) + string(".DiffIndex");
+
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
+
+ // look for the current package file
+ CurrentPackagesFile = _config->FindDir("Dir::State::lists");
+ CurrentPackagesFile += URItoFileName(RealURI);
+
+ // FIXME: this file:/ check is a hack to prevent fetching
+ // from local sources. this is really silly, and
+ // should be fixed cleanly as soon as possible
+ if(!FileExists(CurrentPackagesFile) ||
+ Desc.URI.substr(0,strlen("file:/")) == "file:/")
+ {
+ // we don't have a pkg file or we don't want to queue
+ if(Debug)
+ std::clog << "No index file, local or canceld by user" << std::endl;
+ Failed("", NULL);
+ return;
+ }
+
+ if(Debug)
+ std::clog << "pkgAcqIndexDiffs::pkgAcqIndexDiffs(): "
+ << CurrentPackagesFile << std::endl;
+
+ QueueURI(Desc);
+
+}
+ /*}}}*/
+// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
+// ---------------------------------------------------------------------
+/* The only header we use is the last-modified header. */
+string pkgAcqDiffIndex::Custom600Headers()
+{
+ string Final = _config->FindDir("Dir::State::lists");
+ Final += URItoFileName(RealURI) + string(".IndexDiff");
+
+ if(Debug)
+ std::clog << "Custom600Header-IMS: " << Final << std::endl;
+
+ struct stat Buf;
+ if (stat(Final.c_str(),&Buf) != 0)
+ return "\nIndex-File: true";
+
+ return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
+}
+ /*}}}*/
+bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
+{
+ if(Debug)
+ std::clog << "pkgAcqIndexDiffs::ParseIndexDiff() " << IndexDiffFile
+ << std::endl;
+
+ pkgTagSection Tags;
+ string ServerSha1;
+ vector<DiffInfo> available_patches;
+
+ FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
+ pkgTagFile TF(&Fd);
+ if (_error->PendingError() == true)
+ return false;
+
+ if(TF.Step(Tags) == true)
+ {
+ bool found = false;
+ DiffInfo d;
+ string size;
+
+ string const tmp = Tags.FindS("SHA1-Current");
+ std::stringstream ss(tmp);
+ ss >> ServerSha1 >> size;
+ unsigned long const ServerSize = atol(size.c_str());
+
+ FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
+ SHA1Summation SHA1;
+ SHA1.AddFD(fd.Fd(), fd.Size());
+ string const local_sha1 = SHA1.Result();
+
+ if(local_sha1 == ServerSha1)
+ {
+ // we have the same sha1 as the server
+ if(Debug)
+ std::clog << "Package file is up-to-date" << std::endl;
+ // set found to true, this will queue a pkgAcqIndexDiffs with
+ // a empty availabe_patches
+ found = true;
+ }
+ else
+ {
+ if(Debug)
+ std::clog << "SHA1-Current: " << ServerSha1 << std::endl;
+
+ // check the historie and see what patches we need
+ string const history = Tags.FindS("SHA1-History");
+ std::stringstream hist(history);
+ while(hist >> d.sha1 >> size >> d.file)
+ {
+ // read until the first match is found
+ // from that point on, we probably need all diffs
+ if(d.sha1 == local_sha1)
+ found=true;
+ else if (found == false)
+ continue;
+
+ if(Debug)
+ std::clog << "Need to get diff: " << d.file << std::endl;
+ available_patches.push_back(d);
+ }
+
+ if (available_patches.empty() == false)
+ {
+ // patching with too many files is rather slow compared to a fast download
+ unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+ if (fileLimit != 0 && fileLimit < available_patches.size())
+ {
+ if (Debug)
+ std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // see if the patches are too big
+ found = false; // it was true and it will be true again at the end
+ d = *available_patches.begin();
+ string const firstPatch = d.file;
+ unsigned long patchesSize = 0;
+ std::stringstream patches(Tags.FindS("SHA1-Patches"));
+ while(patches >> d.sha1 >> size >> d.file)
+ {
+ if (firstPatch == d.file)
+ found = true;
+ else if (found == false)
+ continue;
+
+ patchesSize += atol(size.c_str());
+ }
+ unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+ if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
+ {
+ if (Debug)
+ std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+ }
+ }
+
+ // we have something, queue the next diff
+ if(found)
+ {
+ // queue the diffs
+ string::size_type const last_space = Description.rfind(" ");
+ if(last_space != string::npos)
+ Description.erase(last_space, Description.size()-last_space);
+ new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
+ ExpectedHash, ServerSha1, available_patches);
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+ return true;
+ }
+ }
+
+ // Nothing found, report and return false
+ // Failing here is ok, if we return false later, the full
+ // IndexFile is queued
+ if(Debug)
+ std::clog << "Can't find a patch in the index file" << std::endl;
+ return false;
+}
+ /*}}}*/
+void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
+{
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << std::endl
+ << "Falling back to normal index file aquire" << std::endl;
+
+ new pkgAcqIndex(Owner, RealURI, Description, Desc.ShortDesc,
+ ExpectedHash);
+
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+}
+ /*}}}*/
+void pkgAcqDiffIndex::Done(string Message,unsigned long Size,string Md5Hash, /*{{{*/
+ pkgAcquire::MethodConfig *Cnf)
+{
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
+
+ Item::Done(Message,Size,Md5Hash,Cnf);
+
+ string FinalFile;
+ FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
+
+ // sucess in downloading the index
+ // rename the index
+ FinalFile += string(".IndexDiff");
+ if(Debug)
+ std::clog << "Renaming: " << DestFile << " -> " << FinalFile
+ << std::endl;
+ Rename(DestFile,FinalFile);
+ chmod(FinalFile.c_str(),0644);
+ DestFile = FinalFile;
+
+ if(!ParseDiffIndex(DestFile))
+ return Failed("", NULL);
+
+ Complete = true;
+ Status = StatDone;
+ Dequeue();
+ return;
+}
+ /*}}}*/
+// AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* The package diff is added to the queue. one object is constructed
+ * for each diff and the index
+ */
+pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
+ string URI,string URIDesc,string ShortDesc,
+ HashString ExpectedHash,
+ string ServerSha1,
+ vector<DiffInfo> diffs)
+ : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
+ available_patches(diffs), ServerSha1(ServerSha1)
+{
+
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(URI);
+
+ Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
+
+ Description = URIDesc;
+ Desc.Owner = this;
+ Desc.ShortDesc = ShortDesc;
+
+ if(available_patches.size() == 0)
+ {
+ // we are done (yeah!)
+ Finish(true);
+ }
+ else
+ {
+ // get the next diff
+ State = StateFetchDiff;
+ QueueNextDiff();
+ }
+}
+ /*}}}*/
+void pkgAcqIndexDiffs::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
+{
+ if(Debug)
+ std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << std::endl
+ << "Falling back to normal index file aquire" << std::endl;
+ new pkgAcqIndex(Owner, RealURI, Description,Desc.ShortDesc,
+ ExpectedHash);
+ Finish();
+}
+ /*}}}*/
+// Finish - helper that cleans the item out of the fetcher queue /*{{{*/
+void pkgAcqIndexDiffs::Finish(bool allDone)
+{
+ // we restore the original name, this is required, otherwise
+ // the file will be cleaned
+ if(allDone)
+ {
+ DestFile = _config->FindDir("Dir::State::lists");
+ DestFile += URItoFileName(RealURI);
+
+ if(!ExpectedHash.empty() && !ExpectedHash.VerifyFile(DestFile))
+ {
+ Status = StatAuthError;
+ ErrorText = _("MD5Sum mismatch");
+ Rename(DestFile,DestFile + ".FAILED");
+ Dequeue();
+ return;
+ }
+
+ // this is for the "real" finish
+ Complete = true;
+ Status = StatDone;
+ Dequeue();
+ if(Debug)
+ std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl;
+ return;
+ }
+
+ if(Debug)
+ std::clog << "Finishing: " << Desc.URI << std::endl;
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+ return;
+}
+ /*}}}*/
+bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
+{
+
+ // calc sha1 of the just patched file
+ string FinalFile = _config->FindDir("Dir::State::lists");
+ FinalFile += URItoFileName(RealURI);
+
+ FileFd fd(FinalFile, FileFd::ReadOnly);
+ SHA1Summation SHA1;
+ SHA1.AddFD(fd.Fd(), fd.Size());
+ string local_sha1 = string(SHA1.Result());
+ if(Debug)
+ std::clog << "QueueNextDiff: "
+ << FinalFile << " (" << local_sha1 << ")"<<std::endl;
+
+ // final file reached before all patches are applied
+ if(local_sha1 == ServerSha1)
+ {
+ Finish(true);
+ return true;
+ }
+
+ // remove all patches until the next matching patch is found
+ // this requires the Index file to be ordered
+ for(vector<DiffInfo>::iterator I=available_patches.begin();
+ available_patches.size() > 0 &&
+ I != available_patches.end() &&
+ (*I).sha1 != local_sha1;
+ I++)
+ {
+ available_patches.erase(I);
+ }
+
+ // error checking and falling back if no patch was found
+ if(available_patches.size() == 0)
+ {
+ Failed("", NULL);
+ return false;
+ }
+
+ // queue the right diff
+ Desc.URI = string(RealURI) + ".diff/" + available_patches[0].file + ".gz";
+ Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(RealURI + ".diff/" + available_patches[0].file);
+
+ if(Debug)
+ std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
+
+ QueueURI(Desc);
+
+ return true;
+}
+ /*}}}*/
+void pkgAcqIndexDiffs::Done(string Message,unsigned long Size,string Md5Hash, /*{{{*/
+ pkgAcquire::MethodConfig *Cnf)
+{
+ if(Debug)
+ std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
+
+ Item::Done(Message,Size,Md5Hash,Cnf);
+
+ string FinalFile;
+ FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
+
+ // sucess in downloading a diff, enter ApplyDiff state
+ if(State == StateFetchDiff)
+ {
+
+ if(Debug)
+ std::clog << "Sending to gzip method: " << FinalFile << std::endl;
+
+ string FileName = LookupTag(Message,"Filename");
+ State = StateUnzipDiff;
+ Local = true;
+ Desc.URI = "gzip:" + FileName;
+ DestFile += ".decomp";
+ QueueURI(Desc);
+ Mode = "gzip";
+ return;
+ }
+
+ // sucess in downloading a diff, enter ApplyDiff state
+ if(State == StateUnzipDiff)
+ {
+
+ // rred excepts the patch as $FinalFile.ed
+ Rename(DestFile,FinalFile+".ed");
+ if(Debug)
+ std::clog << "Sending to rred method: " << FinalFile << std::endl;
+
+ State = StateApplyDiff;
+ Local = true;
+ Desc.URI = "rred:" + FinalFile;
+ QueueURI(Desc);
+ Mode = "rred";
+ return;
+ }
+
+
+ // success in download/apply a diff, queue next (if needed)
+ if(State == StateApplyDiff)
+ {
+ // remove the just applied patch
+ available_patches.erase(available_patches.begin());
+
+ // move into place
+ if(Debug)
+ {
+ std::clog << "Moving patched file in place: " << std::endl
+ << DestFile << " -> " << FinalFile << std::endl;
+ }
+ Rename(DestFile,FinalFile);
+ chmod(FinalFile.c_str(),0644);
+
+ // see if there is more to download
+ if(available_patches.size() > 0) {
+ new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
+ ExpectedHash, ServerSha1, available_patches);
+ return Finish();
+ } else
+ return Finish(true);
+ }
+}
+ /*}}}*/
// AcqIndex::AcqIndex - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* The package file is added to the queue and a second class is
instantiated to fetch the revision file */
pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner,
string URI,string URIDesc,string ShortDesc,
- string ExpectedMD5, string comprExt) :
- Item(Owner), RealURI(URI), ExpectedMD5(ExpectedMD5)
+ HashString ExpectedHash, string comprExt)
+ : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash)
{
Decompression = false;
Erase = false;
if(comprExt.empty())
{
// autoselect the compression method
- if(FileExists("/bin/bzip2"))
- CompressionExtension = ".bz2";
- else
- CompressionExtension = ".gz";
- } else {
- CompressionExtension = comprExt;
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+ if (types.empty() == true)
+ comprExt = "plain";
+ else
+ comprExt = "." + types[0];
}
- Desc.URI = URI + CompressionExtension;
+ CompressionExtension = ((comprExt == "plain" || comprExt == ".") ? "" : comprExt);
+
+ Desc.URI = URI + CompressionExtension;
Desc.Description = URIDesc;
Desc.Owner = this;
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
/*}}}*/
-
-void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
+void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
{
- // no .bz2 found, retry with .gz
- if(Desc.URI.substr(Desc.URI.size()-3) == "bz2") {
- Desc.URI = Desc.URI.substr(0,Desc.URI.size()-3) + "gz";
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+
+ for (std::vector<std::string>::const_iterator t = types.begin();
+ t != types.end(); t++)
+ {
+ // jump over all already tried compression types
+ const unsigned int nameLen = Desc.URI.size() - (*t).size();
+ if(Desc.URI.substr(nameLen) != *t)
+ continue;
- // retry with a gzip one
- new pkgAcqIndex(Owner, RealURI, Desc.Description,Desc.ShortDesc,
- ExpectedMD5, string(".gz"));
+ // we want to try it with the next extension (and make sure to
+ // not skip over the end)
+ t++;
+ if (t == types.end())
+ break;
+
+ // queue new download
+ Desc.URI = Desc.URI.substr(0, nameLen) + *t;
+ new pkgAcqIndex(Owner, RealURI, Desc.Description, Desc.ShortDesc,
+ ExpectedHash, string(".").append(*t));
+
Status = StatDone;
Complete = false;
Dequeue();
return;
}
-
+ // on decompression failure, remove bad versions in partial/
+ if(Decompression && Erase) {
+ string s = _config->FindDir("Dir::State::lists") + "partial/";
+ s += URItoFileName(RealURI);
+ unlink(s.c_str());
+ }
+
Item::Failed(Message,Cnf);
}
-
-
+ /*}}}*/
// AcqIndex::Done - Finished a fetch /*{{{*/
// ---------------------------------------------------------------------
/* This goes through a number of states.. On the initial fetch the
to the uncompressed version of the file. If this is so the file
is copied into the partial directory. In all other cases the file
is decompressed with a gzip uri. */
-void pkgAcqIndex::Done(string Message,unsigned long Size,string MD5,
+void pkgAcqIndex::Done(string Message,unsigned long Size,string Hash,
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,MD5,Cfg);
+ Item::Done(Message,Size,Hash,Cfg);
if (Decompression == true)
{
if (_config->FindB("Debug::pkgAcquire::Auth", false))
{
- std::cerr << std::endl << RealURI << ": Computed MD5: " << MD5;
- std::cerr << " Expected MD5: " << ExpectedMD5 << std::endl;
- }
-
- if (MD5.empty())
- {
- MD5Summation sum;
- FileFd Fd(DestFile, FileFd::ReadOnly);
- sum.AddFD(Fd.Fd(), Fd.Size());
- Fd.Close();
- MD5 = (string)sum.Result();
+ std::cerr << std::endl << RealURI << ": Computed Hash: " << Hash;
+ std::cerr << " Expected Hash: " << ExpectedHash.toStr() << std::endl;
}
- if (!ExpectedMD5.empty() && MD5 != ExpectedMD5)
+ if (!ExpectedHash.empty() && ExpectedHash.toStr() != Hash)
{
Status = StatAuthError;
- ErrorText = _("MD5Sum mismatch");
+ ErrorText = _("Hash Sum mismatch");
Rename(DestFile,DestFile + ".FAILED");
return;
}
{
// The files timestamp matches
if (StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false) == true)
- {
- unlink(FileName.c_str());
return;
- }
Decompression = true;
Local = true;
DestFile += ".decomp";
// The files timestamp matches
if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
- {
- unlink(FileName.c_str());
return;
- }
if (FileName == DestFile)
Erase = true;
else
Local = true;
- string compExt = Desc.URI.substr(Desc.URI.size()-3);
- char *decompProg;
- if(compExt == "bz2")
- decompProg = "bzip2";
- else if(compExt == ".gz")
- decompProg = "gzip";
+ string compExt = flExtension(flNotDir(URI(Desc.URI).Path));
+ string decompProg;
+
+ // get the binary name for your used compression type
+ decompProg = _config->Find(string("Acquire::CompressionTypes::").append(compExt),"");
+ if(decompProg.empty() == false);
+ // flExtensions returns the full name if no extension is found
+ // this is why we have this complicated compare operation here
+ // FIMXE: add a new flJustExtension() that return "" if no
+ // extension is found and use that above so that it can
+ // be tested against ""
+ else if(compExt == flNotDir(URI(Desc.URI).Path))
+ decompProg = "copy";
else {
_error->Error("Unsupported extension: %s", compExt.c_str());
return;
Decompression = true;
DestFile += ".decomp";
- Desc.URI = string(decompProg) + ":" + FileName;
+ Desc.URI = decompProg + ":" + FileName;
QueueURI(Desc);
- Mode = decompProg;
+ Mode = decompProg.c_str();
}
-
-pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner,
+ /*}}}*/
+// AcqIndexTrans::pkgAcqIndexTrans - Constructor /*{{{*/
+// ---------------------------------------------------------------------
+/* The Translation file is added to the queue */
+pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner,
+ string URI,string URIDesc,string ShortDesc)
+ : pkgAcqIndex(Owner, URI, URIDesc, ShortDesc, HashString(), "")
+{
+}
+ /*}}}*/
+// AcqIndexTrans::Failed - Silence failure messages for missing files /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void pkgAcqIndexTrans::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
+{
+ if (Cnf->LocalOnly == true ||
+ StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
+ {
+ // Ignore this
+ Status = StatDone;
+ Complete = false;
+ Dequeue();
+ return;
+ }
+
+ Item::Failed(Message,Cnf);
+}
+ /*}}}*/
+pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, /*{{{*/
string URI,string URIDesc,string ShortDesc,
string MetaIndexURI, string MetaIndexURIDesc,
string MetaIndexShortDesc,
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += URItoFileName(URI);
- // remove any partial downloaded sig-file. it may confuse proxies
- // and is too small to warrant a partial download anyway
+ // remove any partial downloaded sig-file in partial/.
+ // it may confuse proxies and is too small to warrant a
+ // partial download anyway
unlink(DestFile.c_str());
// Create the item
Desc.Owner = this;
Desc.ShortDesc = ShortDesc;
Desc.URI = URI;
-
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
struct stat Buf;
if (stat(Final.c_str(),&Buf) == 0)
{
- // File was already in place. It needs to be re-verified
- // because Release might have changed, so Move it into partial
- Rename(Final,DestFile);
+ // File was already in place. It needs to be re-downloaded/verified
+ // because Release might have changed, we do give it a differnt
+ // name than DestFile because otherwise the http method will
+ // send If-Range requests and there are too many broken servers
+ // out there that do not understand them
+ LastGoodSig = DestFile+".reverify";
+ Rename(Final,LastGoodSig);
}
QueueURI(Desc);
string pkgAcqMetaSig::Custom600Headers()
{
struct stat Buf;
- if (stat(DestFile.c_str(),&Buf) != 0)
+ if (stat(LastGoodSig.c_str(),&Buf) != 0)
return "\nIndex-File: true";
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
Complete = true;
+ // put the last known good file back on i-m-s hit (it will
+ // be re-verified again)
+ // Else do nothing, we have the new file in DestFile then
+ if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
+ Rename(LastGoodSig, DestFile);
+
// queue a pkgAcqMetaIndex to be verified against the sig we just retrieved
new pkgAcqMetaIndex(Owner, MetaIndexURI, MetaIndexURIDesc, MetaIndexShortDesc,
DestFile, IndexTargets, MetaIndexParser);
}
/*}}}*/
-void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
+void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/
{
+ string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
// if we get a network error we fail gracefully
- if(LookupTag(Message,"FailReason") == "Timeout" ||
- LookupTag(Message,"FailReason") == "TmpResolveFailure" ||
- LookupTag(Message,"FailReason") == "ConnectionRefused") {
+ if(Status == StatTransientNetworkError)
+ {
Item::Failed(Message,Cnf);
+ // move the sigfile back on transient network failures
+ if(FileExists(DestFile))
+ Rename(LastGoodSig,Final);
+
+ // set the status back to , Item::Failed likes to reset it
+ Status = pkgAcquire::Item::StatTransientNetworkError;
return;
}
// Delete any existing sigfile when the acquire failed
- string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
unlink(Final.c_str());
// queue a pkgAcqMetaIndex with no sigfile
Item::Failed(Message,Cnf);
}
-
-pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner,
+ /*}}}*/
+pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner, /*{{{*/
string URI,string URIDesc,string ShortDesc,
string SigFile,
const vector<struct IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser) :
- Item(Owner), RealURI(URI), SigFile(SigFile), AuthPass(false),
- MetaIndexParser(MetaIndexParser), IndexTargets(IndexTargets), IMSHit(false)
+ Item(Owner), RealURI(URI), SigFile(SigFile), IndexTargets(IndexTargets),
+ MetaIndexParser(MetaIndexParser), AuthPass(false), IMSHit(false)
{
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += URItoFileName(URI);
QueueURI(Desc);
}
-
/*}}}*/
// pkgAcqMetaIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
-
-void pkgAcqMetaIndex::Done(string Message,unsigned long Size,string MD5,
+ /*}}}*/
+void pkgAcqMetaIndex::Done(string Message,unsigned long Size,string Hash, /*{{{*/
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,MD5,Cfg);
+ Item::Done(Message,Size,Hash,Cfg);
// MetaIndexes are done in two passes: one to download the
// metaindex with an appropriate method, and a second to verify it
}
}
}
-
-void pkgAcqMetaIndex::RetrievalDone(string Message)
+ /*}}}*/
+void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/
{
// We have just finished downloading a Release file (it is not
// verified yet)
chmod(FinalFile.c_str(),0644);
DestFile = FinalFile;
}
-
-void pkgAcqMetaIndex::AuthDone(string Message)
+ /*}}}*/
+void pkgAcqMetaIndex::AuthDone(string Message) /*{{{*/
{
// At this point, the gpgv method has succeeded, so there is a
// valid signature from a key in the trusted keyring. We
Rename(SigFile,VerifiedSigFile);
chmod(VerifiedSigFile.c_str(),0644);
}
-
-void pkgAcqMetaIndex::QueueIndexes(bool verify)
+ /*}}}*/
+void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/
{
for (vector <struct IndexTarget*>::const_iterator Target = IndexTargets->begin();
Target != IndexTargets->end();
Target++)
{
- string ExpectedIndexMD5;
+ HashString ExpectedIndexHash;
if (verify)
{
const indexRecords::checkSum *Record = MetaIndexParser->Lookup((*Target)->MetaKey);
+ (*Target)->MetaKey + " in Meta-index file (malformed Release file?)";
return;
}
- ExpectedIndexMD5 = Record->MD5Hash;
+ ExpectedIndexHash = Record->Hash;
if (_config->FindB("Debug::pkgAcquire::Auth", false))
{
std::cerr << "Queueing: " << (*Target)->URI << std::endl;
- std::cerr << "Expected MD5: " << ExpectedIndexMD5 << std::endl;
+ std::cerr << "Expected Hash: " << ExpectedIndexHash.toStr() << std::endl;
}
- if (ExpectedIndexMD5.empty())
+ if (ExpectedIndexHash.empty())
{
Status = StatAuthError;
- ErrorText = "Unable to find MD5 sum for "
+ ErrorText = "Unable to find hash sum for "
+ (*Target)->MetaKey + " in Meta-index file";
return;
}
}
-
- // Queue Packages file
- new pkgAcqIndex(Owner, (*Target)->URI, (*Target)->Description,
- (*Target)->ShortDesc, ExpectedIndexMD5);
+
+ /* Queue Packages file (either diff or full packages files, depending
+ on the users option) - we also check if the PDiff Index file is listed
+ in the Meta-Index file. Ideal would be if pkgAcqDiffIndex would test this
+ instead, but passing the required info to it is to much hassle */
+ if(_config->FindB("Acquire::PDiffs",true) == true && (verify == false ||
+ MetaIndexParser->Exists(string((*Target)->MetaKey).append(".diff/Index")) == true))
+ new pkgAcqDiffIndex(Owner, (*Target)->URI, (*Target)->Description,
+ (*Target)->ShortDesc, ExpectedIndexHash);
+ else
+ new pkgAcqIndex(Owner, (*Target)->URI, (*Target)->Description,
+ (*Target)->ShortDesc, ExpectedIndexHash);
}
}
-
-bool pkgAcqMetaIndex::VerifyVendor(string Message)
+ /*}}}*/
+bool pkgAcqMetaIndex::VerifyVendor(string Message) /*{{{*/
{
// // Maybe this should be made available from above so we don't have
// // to read and parse it every time?
return true;
}
- /*}}}*/
-// pkgAcqMetaIndex::Failed - no Release file present or no signature
-// file present /*{{{*/
+ /*}}}*/
+// pkgAcqMetaIndex::Failed - no Release file present or no signature file present /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcqMetaIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
// back to queueing Packages files without verification
QueueIndexes(false);
}
-
/*}}}*/
-
// AcqArchive::AcqArchive - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* This just sets up the initial fetch environment and queues the first
the archive is already available in the cache and stashs the MD5 for
checking later. */
bool pkgAcqArchive::QueueNext()
-{
+{
+ string const ForceHash = _config->Find("Acquire::ForceHash");
for (; Vf.end() == false; Vf++)
{
// Ignore not source sources
return false;
string PkgFile = Parse.FileName();
- MD5 = Parse.MD5Hash();
+ if (ForceHash.empty() == false)
+ {
+ if(stringcasecmp(ForceHash, "sha256") == 0)
+ ExpectedHash = HashString("SHA256", Parse.SHA256Hash());
+ else if (stringcasecmp(ForceHash, "sha1") == 0)
+ ExpectedHash = HashString("SHA1", Parse.SHA1Hash());
+ else
+ ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
+ }
+ else
+ {
+ string Hash;
+ if ((Hash = Parse.SHA256Hash()).empty() == false)
+ ExpectedHash = HashString("SHA256", Hash);
+ else if ((Hash = Parse.SHA1Hash()).empty() == false)
+ ExpectedHash = HashString("SHA1", Hash);
+ else
+ ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
+ }
if (PkgFile.empty() == true)
return _error->Error(_("The package index files are corrupted. No Filename: "
"field for package %s."),
// AcqArchive::Done - Finished fetching /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcqArchive::Done(string Message,unsigned long Size,string Md5Hash,
+void pkgAcqArchive::Done(string Message,unsigned long Size,string CalcHash,
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,Md5Hash,Cfg);
+ Item::Done(Message,Size,CalcHash,Cfg);
// Check the size
if (Size != Version->Size)
return;
}
- // Check the md5
- if (Md5Hash.empty() == false && MD5.empty() == false)
+ // Check the hash
+ if(ExpectedHash.toStr() != CalcHash)
{
- if (Md5Hash != MD5)
- {
- Status = StatError;
- ErrorText = _("MD5Sum mismatch");
- if(FileExists(DestFile))
- Rename(DestFile,DestFile + ".FAILED");
- return;
- }
+ Status = StatError;
+ ErrorText = _("Hash Sum mismatch");
+ if(FileExists(DestFile))
+ Rename(DestFile,DestFile + ".FAILED");
+ return;
}
// Grab the output filename
}
}
/*}}}*/
-// AcqArchive::IsTrusted - Determine whether this archive comes from a
-// trusted source /*{{{*/
+// AcqArchive::IsTrusted - Determine whether this archive comes from a trusted source /*{{{*/
// ---------------------------------------------------------------------
bool pkgAcqArchive::IsTrusted()
{
return Trusted;
}
-
+ /*}}}*/
// AcqArchive::Finished - Fetching has finished, tidy up /*{{{*/
// ---------------------------------------------------------------------
/* */
StoreFilename = string();
}
/*}}}*/
-
// AcqFile::pkgAcqFile - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* The file is added to the queue */
-pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI,string MD5,
+pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI,string Hash,
unsigned long Size,string Dsc,string ShortDesc,
- const string &DestDir, const string &DestFilename) :
- Item(Owner), Md5Hash(MD5)
+ const string &DestDir, const string &DestFilename,
+ bool IsIndexFile) :
+ Item(Owner), ExpectedHash(Hash), IsIndexFile(IsIndexFile)
{
Retries = _config->FindI("Acquire::Retries",0);
// AcqFile::Done - Item downloaded OK /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcqFile::Done(string Message,unsigned long Size,string MD5,
+void pkgAcqFile::Done(string Message,unsigned long Size,string CalcHash,
pkgAcquire::MethodConfig *Cnf)
{
- // Check the md5
- if (Md5Hash.empty() == false && MD5.empty() == false)
+ Item::Done(Message,Size,CalcHash,Cnf);
+
+ // Check the hash
+ if(!ExpectedHash.empty() && ExpectedHash.toStr() != CalcHash)
{
- if (Md5Hash != MD5)
- {
- Status = StatError;
- ErrorText = "MD5Sum mismatch";
- Rename(DestFile,DestFile + ".FAILED");
- return;
- }
+ Status = StatError;
+ ErrorText = "Hash Sum mismatch";
+ Rename(DestFile,DestFile + ".FAILED");
+ return;
}
- Item::Done(Message,Size,MD5,Cnf);
-
string FileName = LookupTag(Message,"Filename");
if (FileName.empty() == true)
{
Item::Failed(Message,Cnf);
}
/*}}}*/
+// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
+// ---------------------------------------------------------------------
+/* The only header we use is the last-modified header. */
+string pkgAcqFile::Custom600Headers()
+{
+ if (IsIndexFile)
+ return "\nIndex-File: true";
+ return "";
+}
+ /*}}}*/