using namespace std;
+static void printHashSumComparision(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
+{
+ if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
+ return;
+ std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
+ for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
+ std::cerr << "\t- " << hs->toStr() << std::endl;
+ std::cerr << " Actual Hash: " << std::endl;
+ for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
+ std::cerr << "\t- " << hs->toStr() << std::endl;
+}
+ /*}}}*/
+
// Acquire::Item::Item - Constructor /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-pkgAcquire::Item::Item(pkgAcquire *Owner) : Owner(Owner), FileSize(0),
- PartialSize(0), Mode(0), ID(0), Complete(false),
- Local(false), QueueCounter(0)
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+pkgAcquire::Item::Item(pkgAcquire *Owner, HashStringList const &ExpectedHashes) :
+ Owner(Owner), FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false),
+ Local(false), QueueCounter(0), ExpectedAdditionalItems(0),
+ ExpectedHashes(ExpectedHashes)
{
Owner->Add(this);
Status = StatIdle;
}
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
/*}}}*/
// Acquire::Item::~Item - Destructor /*{{{*/
// ---------------------------------------------------------------------
// Acquire::Item::Done - Item downloaded OK /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcquire::Item::Done(string Message,unsigned long long Size,string /*Hash*/,
+void pkgAcquire::Item::Done(string Message,unsigned long long Size,HashStringList const &/*Hash*/,
pkgAcquire::MethodConfig * /*Cnf*/)
{
// We just downloaded something..
possibly query additional files */
pkgAcqSubIndex::pkgAcqSubIndex(pkgAcquire *Owner, string const &URI,
string const &URIDesc, string const &ShortDesc,
- HashString const &ExpectedHash)
- : Item(Owner), ExpectedHash(ExpectedHash)
+ HashStringList const &ExpectedHashes)
+ : Item(Owner, ExpectedHashes)
{
/* XXX: Beware: Currently this class does nothing (of value) anymore ! */
Debug = _config->FindB("Debug::pkgAcquire::SubIndex",false);
// AcqSubIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqSubIndex::Custom600Headers()
+string pkgAcqSubIndex::Custom600Headers() const
{
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(Desc.URI);
// No good Index is provided
}
/*}}}*/
-void pkgAcqSubIndex::Done(string Message,unsigned long long Size,string Md5Hash, /*{{{*/
+void pkgAcqSubIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig *Cnf)
{
if(Debug)
return;
}
- Item::Done(Message,Size,Md5Hash,Cnf);
+ Item::Done(Message, Size, Hashes, Cnf);
string FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(Desc.URI);
* the original packages file
*/
pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
- string URI,string URIDesc,string ShortDesc,
- HashString ExpectedHash)
- : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
- Description(URIDesc)
+ IndexTarget const * const Target,
+ HashStringList const &ExpectedHashes,
+ indexRecords *MetaIndexParser)
+ : pkgAcqBaseIndex(Owner, Target, ExpectedHashes, MetaIndexParser)
{
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
- Desc.Description = URIDesc + "/DiffIndex";
+ RealURI = Target->URI;
Desc.Owner = this;
- Desc.ShortDesc = ShortDesc;
- Desc.URI = URI + ".diff/Index";
+ Desc.Description = Target->Description + "/DiffIndex";
+ Desc.ShortDesc = Target->ShortDesc;
+ Desc.URI = Target->URI + ".diff/Index";
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(URI) + string(".DiffIndex");
+ DestFile += URItoFileName(Desc.URI);
if(Debug)
std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
Desc.URI.substr(0,strlen("file:/")) == "file:/")
{
// we don't have a pkg file or we don't want to queue
- if(Debug)
- std::clog << "No index file, local or canceld by user" << std::endl;
- Failed("", NULL);
+ Failed("No index file, local or canceld by user", NULL);
return;
}
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqDiffIndex::Custom600Headers()
+string pkgAcqDiffIndex::Custom600Headers() const
{
string Final = _config->FindDir("Dir::State::lists");
- Final += URItoFileName(RealURI) + string(".IndexDiff");
+ Final += URItoFileName(Desc.URI);
if(Debug)
std::clog << "Custom600Header-IMS: " << Final << std::endl;
/*}}}*/
bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
{
+ // failing here is fine: our caller will take care of trying to
+ // get the complete file if patching fails
if(Debug)
std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
<< std::endl;
- pkgTagSection Tags;
- string ServerSha1;
- vector<DiffInfo> available_patches;
-
FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
pkgTagFile TF(&Fd);
if (_error->PendingError() == true)
return false;
- if(TF.Step(Tags) == true)
+ pkgTagSection Tags;
+ if(unlikely(TF.Step(Tags) == false))
+ return false;
+
+ HashStringList ServerHashes;
+ unsigned long long ServerSize = 0;
+
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
- bool found = false;
- DiffInfo d;
- string size;
+ std::string tagname = *type;
+ tagname.append("-Current");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
- string const tmp = Tags.FindS("SHA1-Current");
+ string hash;
+ unsigned long long size;
std::stringstream ss(tmp);
- ss >> ServerSha1 >> size;
- unsigned long const ServerSize = atol(size.c_str());
+ ss >> hash >> size;
+ if (unlikely(hash.empty() == true))
+ continue;
+ if (unlikely(ServerSize != 0 && ServerSize != size))
+ continue;
+ ServerHashes.push_back(HashString(*type, hash));
+ ServerSize = size;
+ }
- FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
- SHA1Summation SHA1;
- SHA1.AddFD(fd);
- string const local_sha1 = SHA1.Result();
+ if (ServerHashes.usable() == false)
+ {
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
+ return false;
+ }
- if(local_sha1 == ServerSha1)
- {
- // we have the same sha1 as the server so we are done here
- if(Debug)
- std::clog << "Package file is up-to-date" << std::endl;
- // list cleanup needs to know that this file as well as the already
- // present index is ours, so we create an empty diff to save it for us
- new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, ServerSha1, available_patches);
- return true;
- }
- else
+ if (ServerHashes != HashSums())
+ {
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
+ return false;
+ }
+
+ if (ServerHashes.VerifyFile(CurrentPackagesFile) == true)
+ {
+ // we have the same sha1 as the server so we are done here
+ if(Debug)
+ std::clog << "pkgAcqDiffIndex: Package file is up-to-date" << std::endl;
+ // list cleanup needs to know that this file as well as the already
+ // present index is ours, so we create an empty diff to save it for us
+ new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser);
+ return true;
+ }
+
+ FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
+ Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if(Debug)
+ std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
+ << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
+
+ // parse all of (provided) history
+ vector<DiffInfo> available_patches;
+ bool firstAcceptedHashes = true;
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+ {
+ if (LocalHashes.find(*type) == NULL)
+ continue;
+
+ std::string tagname = *type;
+ tagname.append("-History");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
+
+ string hash, filename;
+ unsigned long long size;
+ std::stringstream ss(tmp);
+
+ while (ss >> hash >> size >> filename)
{
- if(Debug)
- std::clog << "SHA1-Current: " << ServerSha1 << " and we start at "<< fd.Name() << " " << fd.Size() << " " << local_sha1 << std::endl;
+ if (unlikely(hash.empty() == true || filename.empty() == true))
+ continue;
- // check the historie and see what patches we need
- string const history = Tags.FindS("SHA1-History");
- std::stringstream hist(history);
- while(hist >> d.sha1 >> size >> d.file)
+ // see if we have a record for this file already
+ std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ for (; cur != available_patches.end(); ++cur)
{
- // read until the first match is found
- // from that point on, we probably need all diffs
- if(d.sha1 == local_sha1)
- found=true;
- else if (found == false)
+ if (cur->file != filename || unlikely(cur->result_size != size))
continue;
-
- if(Debug)
- std::clog << "Need to get diff: " << d.file << std::endl;
- available_patches.push_back(d);
+ cur->result_hashes.push_back(HashString(*type, hash));
+ break;
}
-
- if (available_patches.empty() == false)
+ if (cur != available_patches.end())
+ continue;
+ if (firstAcceptedHashes == true)
+ {
+ DiffInfo next;
+ next.file = filename;
+ next.result_hashes.push_back(HashString(*type, hash));
+ next.result_size = size;
+ next.patch_size = 0;
+ available_patches.push_back(next);
+ }
+ else
{
- // patching with too many files is rather slow compared to a fast download
- unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
- if (fileLimit != 0 && fileLimit < available_patches.size())
- {
- if (Debug)
- std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
- << ") so fallback to complete download" << std::endl;
- return false;
- }
-
- // see if the patches are too big
- found = false; // it was true and it will be true again at the end
- d = *available_patches.begin();
- string const firstPatch = d.file;
- unsigned long patchesSize = 0;
- std::stringstream patches(Tags.FindS("SHA1-Patches"));
- while(patches >> d.sha1 >> size >> d.file)
- {
- if (firstPatch == d.file)
- found = true;
- else if (found == false)
- continue;
-
- patchesSize += atol(size.c_str());
- }
- unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
- if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
- {
- if (Debug)
- std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
- << ") so fallback to complete download" << std::endl;
- return false;
- }
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+ << " wasn't in the list for the first parsed hash! (history)" << std::endl;
+ break;
}
}
+ firstAcceptedHashes = false;
+ }
+
+ if (unlikely(available_patches.empty() == true))
+ {
+ if (Debug)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+ << "Couldn't find any patches for the patch series." << std::endl;
+ return false;
+ }
- // we have something, queue the next diff
- if(found)
+ for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
+ {
+ if (LocalHashes.find(*type) == NULL)
+ continue;
+
+ std::string tagname = *type;
+ tagname.append("-Patches");
+ std::string const tmp = Tags.FindS(tagname.c_str());
+ if (tmp.empty() == true)
+ continue;
+
+ string hash, filename;
+ unsigned long long size;
+ std::stringstream ss(tmp);
+
+ while (ss >> hash >> size >> filename)
{
- // queue the diffs
- string::size_type const last_space = Description.rfind(" ");
- if(last_space != string::npos)
- Description.erase(last_space, Description.size()-last_space);
-
- /* decide if we should download patches one by one or in one go:
- The first is good if the server merges patches, but many don't so client
- based merging can be attempt in which case the second is better.
- "bad things" will happen if patches are merged on the server,
- but client side merging is attempt as well */
- bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
- if (pdiff_merge == true)
- {
- // reprepro adds this flag if it has merged patches on the server
- std::string const precedence = Tags.FindS("X-Patch-Precedence");
- pdiff_merge = (precedence != "merged");
- }
+ if (unlikely(hash.empty() == true || filename.empty() == true))
+ continue;
- if (pdiff_merge == false)
- new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, ServerSha1, available_patches);
- else
+ // see if we have a record for this file already
+ std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ for (; cur != available_patches.end(); ++cur)
{
- std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
- for(size_t i = 0; i < available_patches.size(); ++i)
- (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, RealURI, Description, Desc.ShortDesc, ExpectedHash,
- available_patches[i], diffs);
+ if (cur->file != filename)
+ continue;
+ if (unlikely(cur->patch_size != 0 && cur->patch_size != size))
+ continue;
+ cur->patch_hashes.push_back(HashString(*type, hash));
+ cur->patch_size = size;
+ break;
}
-
- Complete = false;
- Status = StatDone;
- Dequeue();
- return true;
+ if (cur != available_patches.end())
+ continue;
+ if (Debug == true)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
+ << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
+ break;
}
}
-
- // Nothing found, report and return false
- // Failing here is ok, if we return false later, the full
- // IndexFile is queued
- if(Debug)
- std::clog << "Can't find a patch in the index file" << std::endl;
- return false;
+
+ bool foundStart = false;
+ for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
+ cur != available_patches.end(); ++cur)
+ {
+ if (LocalHashes != cur->result_hashes)
+ continue;
+
+ available_patches.erase(available_patches.begin(), cur);
+ foundStart = true;
+ break;
+ }
+
+ if (foundStart == false || unlikely(available_patches.empty() == true))
+ {
+ if (Debug)
+ std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
+ << "Couldn't find the start of the patch series." << std::endl;
+ return false;
+ }
+
+ // patching with too many files is rather slow compared to a fast download
+ unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+ if (fileLimit != 0 && fileLimit < available_patches.size())
+ {
+ if (Debug)
+ std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // calculate the size of all patches we have to get
+ // note that all sizes are uncompressed, while we download compressed files
+ unsigned long long patchesSize = 0;
+ for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin();
+ cur != available_patches.end(); ++cur)
+ patchesSize += cur->patch_size;
+ unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+ if (false && sizeLimit > 0 && (sizeLimit/100) < patchesSize)
+ {
+ if (Debug)
+ std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // we have something, queue the diffs
+ string::size_type const last_space = Description.rfind(" ");
+ if(last_space != string::npos)
+ Description.erase(last_space, Description.size()-last_space);
+
+ /* decide if we should download patches one by one or in one go:
+ The first is good if the server merges patches, but many don't so client
+ based merging can be attempt in which case the second is better.
+ "bad things" will happen if patches are merged on the server,
+ but client side merging is attempt as well */
+ bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
+ if (pdiff_merge == true)
+ {
+ // reprepro adds this flag if it has merged patches on the server
+ std::string const precedence = Tags.FindS("X-Patch-Precedence");
+ pdiff_merge = (precedence != "merged");
+ }
+
+ if (pdiff_merge == false)
+ {
+ new pkgAcqIndexDiffs(Owner, Target, ExpectedHashes, MetaIndexParser,
+ available_patches);
+ }
+ else
+ {
+ std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
+ for(size_t i = 0; i < available_patches.size(); ++i)
+ (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, Target,
+ ExpectedHashes,
+ MetaIndexParser,
+ available_patches[i],
+ diffs);
+ }
+
+ Complete = false;
+ Status = StatDone;
+ Dequeue();
+ return true;
}
/*}}}*/
void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/
std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
<< "Falling back to normal index file acquire" << std::endl;
- new pkgAcqIndex(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash);
+ new pkgAcqIndex(Owner, Target, ExpectedHashes, MetaIndexParser);
Complete = false;
Status = StatDone;
Dequeue();
}
/*}}}*/
-void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,string Md5Hash, /*{{{*/
+void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig *Cnf)
{
if(Debug)
std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
- Item::Done(Message,Size,Md5Hash,Cnf);
+ Item::Done(Message, Size, Hashes, Cnf);
string FinalFile;
FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
DestFile = FinalFile;
if(!ParseDiffIndex(DestFile))
- return Failed("", NULL);
+ return Failed("Parsing pdiff Index failed", NULL);
Complete = true;
Status = StatDone;
* for each diff and the index
*/
pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
- string URI,string URIDesc,string ShortDesc,
- HashString ExpectedHash,
- string ServerSha1,
+ struct IndexTarget const * const Target,
+ HashStringList const &ExpectedHashes,
+ indexRecords *MetaIndexParser,
vector<DiffInfo> diffs)
- : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
- available_patches(diffs), ServerSha1(ServerSha1)
+ : pkgAcqBaseIndex(Owner, Target, ExpectedHashes, MetaIndexParser),
+ available_patches(diffs)
{
-
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(URI);
+ DestFile += URItoFileName(Target->URI);
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
- Description = URIDesc;
+ RealURI = Target->URI;
Desc.Owner = this;
- Desc.ShortDesc = ShortDesc;
+ Description = Target->Description;
+ Desc.ShortDesc = Target->ShortDesc;
if(available_patches.empty() == true)
{
if(Debug)
std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl
<< "Falling back to normal index file acquire" << std::endl;
- new pkgAcqIndex(Owner, RealURI, Description,Desc.ShortDesc,
- ExpectedHash);
+ new pkgAcqIndex(Owner, Target, ExpectedHashes, MetaIndexParser);
Finish();
}
/*}}}*/
DestFile = _config->FindDir("Dir::State::lists");
DestFile += URItoFileName(RealURI);
- if(!ExpectedHash.empty() && !ExpectedHash.VerifyFile(DestFile))
+ if(HashSums().usable() && !HashSums().VerifyFile(DestFile))
{
RenameOnError(HashSumMismatch);
Dequeue();
FinalFile += URItoFileName(RealURI);
FileFd fd(FinalFile, FileFd::ReadOnly);
- SHA1Summation SHA1;
- SHA1.AddFD(fd);
- string local_sha1 = string(SHA1.Result());
+ Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
if(Debug)
- std::clog << "QueueNextDiff: "
- << FinalFile << " (" << local_sha1 << ")"<<std::endl;
+ std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
+
+ if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false))
+ {
+ Failed("Local/Expected hashes are not usable", NULL);
+ return false;
+ }
// final file reached before all patches are applied
- if(local_sha1 == ServerSha1)
+ if(LocalHashes == ExpectedHashes)
{
Finish(true);
return true;
// remove all patches until the next matching patch is found
// this requires the Index file to be ordered
- for(vector<DiffInfo>::iterator I=available_patches.begin();
+ for(vector<DiffInfo>::iterator I = available_patches.begin();
available_patches.empty() == false &&
I != available_patches.end() &&
- I->sha1 != local_sha1;
+ I->result_hashes != LocalHashes;
++I)
{
available_patches.erase(I);
// error checking and falling back if no patch was found
if(available_patches.empty() == true)
{
- Failed("", NULL);
+ Failed("No patches left to reach target", NULL);
return false;
}
if(Debug)
std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
-
+
QueueURI(Desc);
return true;
}
/*}}}*/
-void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size,string Md5Hash, /*{{{*/
+void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig *Cnf)
{
if(Debug)
std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
- Item::Done(Message,Size,Md5Hash,Cnf);
+ Item::Done(Message, Size, Hashes, Cnf);
string FinalFile;
FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI);
// success in downloading a diff, enter ApplyDiff state
if(State == StateFetchDiff)
{
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+ class Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if (fd.Size() != available_patches[0].patch_size ||
+ available_patches[0].patch_hashes != LocalHashes)
+ {
+ Failed("Patch has Size/Hashsum mismatch", NULL);
+ return;
+ }
// rred excepts the patch as $FinalFile.ed
Rename(DestFile,FinalFile+".ed");
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
+ ActiveSubprocess = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
// see if there is more to download
if(available_patches.empty() == false) {
- new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, ServerSha1, available_patches);
+ new pkgAcqIndexDiffs(Owner, Target,
+ ExpectedHashes, MetaIndexParser,
+ available_patches);
return Finish();
} else
return Finish(true);
/*}}}*/
// AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner,
- string const &URI, string const &URIDesc,
- string const &ShortDesc, HashString const &ExpectedHash,
- DiffInfo const &patch,
- std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
- : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
- patch(patch),allPatches(allPatches), State(StateFetchDiff)
+ struct IndexTarget const * const Target,
+ HashStringList const &ExpectedHashes,
+ indexRecords *MetaIndexParser,
+ DiffInfo const &patch,
+ std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
+ : pkgAcqBaseIndex(Owner, Target, ExpectedHashes, MetaIndexParser),
+ patch(patch), allPatches(allPatches), State(StateFetchDiff)
{
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(URI);
+ DestFile += URItoFileName(Target->URI);
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
- Description = URIDesc;
+ RealURI = Target->URI;
Desc.Owner = this;
- Desc.ShortDesc = ShortDesc;
+ Description = Target->Description;
+ Desc.ShortDesc = Target->ShortDesc;
Desc.URI = RealURI + ".diff/" + patch.file + ".gz";
Desc.Description = Description + " " + patch.file + string(".pdiff");
// first failure means we should fallback
State = StateErrorDiff;
std::clog << "Falling back to normal index file acquire" << std::endl;
- new pkgAcqIndex(Owner, RealURI, Description,Desc.ShortDesc,
- ExpectedHash);
+ new pkgAcqIndex(Owner, Target, ExpectedHashes, MetaIndexParser);
}
/*}}}*/
-void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,string Md5Hash, /*{{{*/
+void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig *Cnf)
{
if(Debug)
std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
- Item::Done(Message,Size,Md5Hash,Cnf);
+ Item::Done(Message,Size,Hashes,Cnf);
string const FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
if (State == StateFetchDiff)
{
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
+ class Hashes LocalHashesCalc;
+ LocalHashesCalc.AddFD(fd);
+ HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
+
+ if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes)
+ {
+ Failed("Patch has Size/Hashsum mismatch", NULL);
+ return;
+ }
+
// rred expects the patch as $FinalFile.ed.$patchname.gz
Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz");
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
+ ActiveSubprocess = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "rred";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
// success in download/apply all diffs, clean up
else if (State == StateApplyDiff)
{
// see if we really got the expected file
- if(!ExpectedHash.empty() && !ExpectedHash.VerifyFile(DestFile))
+ if(ExpectedHashes.usable() && !ExpectedHashes.VerifyFile(DestFile))
{
RenameOnError(HashSumMismatch);
return;
instantiated to fetch the revision file */
pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner,
string URI,string URIDesc,string ShortDesc,
- HashString ExpectedHash, string comprExt)
- : Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash)
+ HashStringList const &ExpectedHash, string comprExt)
+ : pkgAcqBaseIndex(Owner, NULL, ExpectedHash, NULL), RealURI(URI)
{
if(comprExt.empty() == true)
{
}
CompressionExtension = comprExt;
- Verify = true;
-
Init(URI, URIDesc, ShortDesc);
}
pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, IndexTarget const *Target,
- HashString const &ExpectedHash, indexRecords const *MetaIndexParser)
- : Item(Owner), RealURI(Target->URI), ExpectedHash(ExpectedHash)
+ HashStringList const &ExpectedHash,
+ indexRecords *MetaIndexParser)
+ : pkgAcqBaseIndex(Owner, Target, ExpectedHash, MetaIndexParser),
+ RealURI(Target->URI)
{
// autoselect the compression method
std::vector<std::string> types = APT::Configuration::getCompressionTypes();
CompressionExtension = "";
- if (ExpectedHash.empty() == false)
+ if (ExpectedHashes.usable())
{
for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t)
if (*t == "uncompressed" || MetaIndexParser->Exists(string(Target->MetaKey).append(".").append(*t)) == true)
if (CompressionExtension.empty() == false)
CompressionExtension.erase(CompressionExtension.end()-1);
- // only verify non-optional targets, see acquire-item.h for a FIXME
- // to make this more flexible
- if (Target->IsOptional())
- Verify = false;
- else
- Verify = true;
-
Init(Target->URI, Target->Description, Target->ShortDesc);
}
/*}}}*/
DestFile += URItoFileName(URI);
std::string const comprExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
+ std::string MetaKey;
if (comprExt == "uncompressed")
+ {
Desc.URI = URI;
+ if(Target)
+ MetaKey = string(Target->MetaKey);
+ }
else
+ {
Desc.URI = URI + '.' + comprExt;
+ DestFile = DestFile + '.' + comprExt;
+ if(Target)
+ MetaKey = string(Target->MetaKey) + '.' + comprExt;
+ }
+
+ // load the filesize
+ if(MetaIndexParser)
+ {
+ indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
+ if(Record)
+ FileSize = Record->Size;
+
+ InitByHashIfNeeded(MetaKey);
+ }
Desc.Description = URIDesc;
Desc.Owner = this;
QueueURI(Desc);
}
/*}}}*/
+// AcqIndex::AdjustForByHash - modify URI for by-hash support /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey)
+{
+ // TODO:
+ // - (maybe?) add support for by-hash into the sources.list as flag
+ // - make apt-ftparchive generate the hashes (and expire?)
+ std::string HostKnob = "APT::Acquire::" + ::URI(Desc.URI).Host + "::By-Hash";
+ if(_config->FindB("APT::Acquire::By-Hash", false) == true ||
+ _config->FindB(HostKnob, false) == true ||
+ MetaIndexParser->GetSupportsAcquireByHash())
+ {
+ indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
+ if(Record)
+ {
+ // FIXME: should we really use the best hash here? or a fixed one?
+ const HashString *TargetHash = Record->Hashes.find("");
+ std::string ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue();
+ size_t trailing_slash = Desc.URI.find_last_of("/");
+ Desc.URI = Desc.URI.replace(
+ trailing_slash,
+ Desc.URI.substr(trailing_slash+1).size()+1,
+ ByHash);
+ } else {
+ _error->Warning(
+ "Fetching ByHash requested but can not find record for %s",
+ MetaKey.c_str());
+ }
+ }
+}
+ /*}}}*/
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqIndex::Custom600Headers()
+string pkgAcqIndex::Custom600Headers() const
{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
if (_config->FindB("Acquire::GzipIndexes",false))
- Final += ".gz";
+ Final += compExt;
string msg = "\nIndex-File: true";
- // FIXME: this really should use "IndexTarget::IsOptional()" but that
- // seems to be difficult without breaking ABI
- if (ShortDesc().find("Translation") != 0)
- msg += "\nFail-Ignore: true";
+
struct stat Buf;
if (stat(Final.c_str(),&Buf) == 0)
msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
Item::Failed(Message,Cnf);
}
/*}}}*/
+// pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/
+std::string pkgAcqIndex::GetFinalFilename(std::string const &URI,
+ std::string const &compExt)
+{
+ std::string FinalFile = _config->FindDir("Dir::State::lists");
+ FinalFile += URItoFileName(URI);
+ if (_config->FindB("Acquire::GzipIndexes",false) == true)
+ FinalFile += '.' + compExt;
+ return FinalFile;
+}
+ /*}}}*/
+// AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/
+void pkgAcqIndex::ReverifyAfterIMS(std::string const &FileName)
+{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
+ if (_config->FindB("Acquire::GzipIndexes",false) == true)
+ DestFile += compExt;
+
+ string FinalFile = GetFinalFilename(RealURI, compExt);
+ Rename(FinalFile, FileName);
+ Decompression = true;
+ Desc.URI = "copy:" + FileName;
+ QueueURI(Desc);
+}
+ /*}}}*/
// AcqIndex::Done - Finished a fetch /*{{{*/
// ---------------------------------------------------------------------
/* This goes through a number of states.. On the initial fetch the
to the uncompressed version of the file. If this is so the file
is copied into the partial directory. In all other cases the file
is decompressed with a gzip uri. */
-void pkgAcqIndex::Done(string Message,unsigned long long Size,string Hash,
+void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes,
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,Hash,Cfg);
+ Item::Done(Message,Size,Hashes,Cfg);
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
if (Decompression == true)
{
- if (_config->FindB("Debug::pkgAcquire::Auth", false))
- {
- std::cerr << std::endl << RealURI << ": Computed Hash: " << Hash;
- std::cerr << " Expected Hash: " << ExpectedHash.toStr() << std::endl;
- }
-
- if (!ExpectedHash.empty() && ExpectedHash.toStr() != Hash)
+ if (ExpectedHashes.usable() && ExpectedHashes != Hashes)
{
+ Desc.URI = RealURI;
RenameOnError(HashSumMismatch);
+ printHashSumComparision(RealURI, ExpectedHashes, Hashes);
return;
}
- /* Verify the index file for correctness (all indexes must
- * have a Package field) (LP: #346386) (Closes: #627642) */
- if (Verify == true)
+ // FIXME: this can go away once we only ever download stuff that
+ // has a valid hash and we never do GET based probing
+ //
+ /* Always verify the index file for correctness (all indexes must
+ * have a Package field) (LP: #346386) (Closes: #627642)
+ */
+ FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Extension);
+ // Only test for correctness if the file is not empty (empty is ok)
+ if (fd.Size() > 0)
{
- FileFd fd(DestFile, FileFd::ReadOnly);
- // Only test for correctness if the file is not empty (empty is ok)
- if (fd.FileSize() > 0)
- {
- pkgTagSection sec;
- pkgTagFile tag(&fd);
-
- // all our current indexes have a field 'Package' in each section
- if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false)
- {
- RenameOnError(InvalidFormat);
- return;
- }
+ pkgTagSection sec;
+ pkgTagFile tag(&fd);
+
+ // all our current indexes have a field 'Package' in each section
+ if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false)
+ {
+ RenameOnError(InvalidFormat);
+ return;
}
}
// Done, move it into position
- string FinalFile = _config->FindDir("Dir::State::lists");
- FinalFile += URItoFileName(RealURI);
+ string FinalFile = GetFinalFilename(RealURI, compExt);
Rename(DestFile,FinalFile);
chmod(FinalFile.c_str(),0644);
-
+
/* We restore the original name to DestFile so that the clean operation
will work OK */
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += URItoFileName(RealURI);
-
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ DestFile += '.' + compExt;
+
// Remove the compressed version.
if (Erase == true)
unlink(DestFile.c_str());
+
return;
}
string FileName = LookupTag(Message,"Alt-Filename");
if (FileName.empty() == false)
{
- // The files timestamp matches
- if (StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false) == true)
- return;
Decompression = true;
Local = true;
DestFile += ".decomp";
Desc.URI = "copy:" + FileName;
QueueURI(Desc);
+ ActiveSubprocess = "copy";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
Mode = "copy";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
ErrorText = "Method gave a blank filename";
}
- std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
-
- // The files timestamp matches
- if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) {
- if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz")
- // Update DestFile for .gz suffix so that the clean operation keeps it
- DestFile += ".gz";
- return;
- }
-
if (FileName == DestFile)
Erase = true;
else
Local = true;
-
+
+ // do not reverify cdrom sources as apt-cdrom may rewrite the Packages
+ // file when its doing the indexcopy
+ if (RealURI.substr(0,6) == "cdrom:" &&
+ StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
+ return;
+
+ // The files timestamp matches, for non-local URLs reverify the local
+ // file, for local file, uncompress again to ensure the hashsum is still
+ // matching the Release file
+ if (!Local && StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
+ {
+ // set destfile to the final destfile
+ if(_config->FindB("Acquire::GzipIndexes",false) == false)
+ {
+ DestFile = _config->FindDir("Dir::State::lists") + "partial/";
+ DestFile += URItoFileName(RealURI);
+ }
+
+ ReverifyAfterIMS(FileName);
+ return;
+ }
string decompProg;
- // If we enable compressed indexes and already have gzip, keep it
- if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz" && !Local) {
- string FinalFile = _config->FindDir("Dir::State::lists");
- FinalFile += URItoFileName(RealURI) + ".gz";
- Rename(DestFile,FinalFile);
- chmod(FinalFile.c_str(),0644);
-
- // Update DestFile for .gz suffix so that the clean operation keeps it
- DestFile = _config->FindDir("Dir::State::lists") + "partial/";
- DestFile += URItoFileName(RealURI) + ".gz";
+ // If we enable compressed indexes, queue for hash verification
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ {
+ DestFile = _config->FindDir("Dir::State::lists");
+ DestFile += URItoFileName(RealURI) + '.' + compExt;
+
+ Decompression = true;
+ Desc.URI = "copy:" + FileName;
+ QueueURI(Desc);
+
return;
}
Desc.URI = decompProg + ":" + FileName;
QueueURI(Desc);
- // FIXME: this points to a c++ string that goes out of scope
- Mode = decompProg.c_str();
+ ActiveSubprocess = decompProg;
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+ Mode = ActiveSubprocess.c_str();
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
}
/*}}}*/
// AcqIndexTrans::pkgAcqIndexTrans - Constructor /*{{{*/
/* The Translation file is added to the queue */
pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner,
string URI,string URIDesc,string ShortDesc)
- : pkgAcqIndex(Owner, URI, URIDesc, ShortDesc, HashString(), "")
+ : pkgAcqIndex(Owner, URI, URIDesc, ShortDesc, HashStringList(), "")
{
}
-pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, IndexTarget const *Target,
- HashString const &ExpectedHash, indexRecords const *MetaIndexParser)
- : pkgAcqIndex(Owner, Target, ExpectedHash, MetaIndexParser)
+pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, IndexTarget const * const Target,
+ HashStringList const &ExpectedHashes, indexRecords *MetaIndexParser)
+ : pkgAcqIndex(Owner, Target, ExpectedHashes, MetaIndexParser)
{
+ // load the filesize
+ indexRecords::checkSum *Record = MetaIndexParser->Lookup(string(Target->MetaKey));
+ if(Record)
+ FileSize = Record->Size;
}
/*}}}*/
// AcqIndexTrans::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
-string pkgAcqIndexTrans::Custom600Headers()
+string pkgAcqIndexTrans::Custom600Headers() const
{
+ std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' '));
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
+ if (_config->FindB("Acquire::GzipIndexes",false))
+ Final += compExt;
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
string MetaIndexShortDesc,
const vector<IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser) :
- Item(Owner), RealURI(URI), MetaIndexURI(MetaIndexURI),
+ Item(Owner, HashStringList()), RealURI(URI), MetaIndexURI(MetaIndexURI),
MetaIndexURIDesc(MetaIndexURIDesc), MetaIndexShortDesc(MetaIndexShortDesc),
MetaIndexParser(MetaIndexParser), IndexTargets(IndexTargets)
{
Rename(Final,LastGoodSig);
}
+ // we expect the indextargets + one additional Release file
+ ExpectedAdditionalItems = IndexTargets->size() + 1;
+
QueueURI(Desc);
}
/*}}}*/
// pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqMetaSig::Custom600Headers()
+string pkgAcqMetaSig::Custom600Headers() const
{
struct stat Buf;
if (stat(LastGoodSig.c_str(),&Buf) != 0)
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
-void pkgAcqMetaSig::Done(string Message,unsigned long long Size,string MD5,
+void pkgAcqMetaSig::Done(string Message,unsigned long long Size, HashStringList const &Hashes,
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,MD5,Cfg);
+ Item::Done(Message, Size, Hashes, Cfg);
string FileName = LookupTag(Message,"Filename");
if (FileName.empty() == true)
Complete = true;
+ // at this point pkgAcqMetaIndex takes over
+ ExpectedAdditionalItems = 0;
+
// put the last known good file back on i-m-s hit (it will
// be re-verified again)
// Else do nothing, we have the new file in DestFile then
{
string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
+ // at this point pkgAcqMetaIndex takes over
+ ExpectedAdditionalItems = 0;
+
// if we get a network error we fail gracefully
if(Status == StatTransientNetworkError)
{
pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner, /*{{{*/
string URI,string URIDesc,string ShortDesc,
string SigFile,
- const vector<struct IndexTarget*>* IndexTargets,
+ const vector<IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser) :
- Item(Owner), RealURI(URI), SigFile(SigFile), IndexTargets(IndexTargets),
+ Item(Owner, HashStringList()), RealURI(URI), SigFile(SigFile), IndexTargets(IndexTargets),
MetaIndexParser(MetaIndexParser), AuthPass(false), IMSHit(false)
{
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
Desc.ShortDesc = ShortDesc;
Desc.URI = URI;
+ // we expect more item
+ ExpectedAdditionalItems = IndexTargets->size();
+
QueueURI(Desc);
}
/*}}}*/
// pkgAcqMetaIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqMetaIndex::Custom600Headers()
+string pkgAcqMetaIndex::Custom600Headers() const
{
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
/*}}}*/
-void pkgAcqMetaIndex::Done(string Message,unsigned long long Size,string Hash, /*{{{*/
+void pkgAcqMetaIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,Hash,Cfg);
+ Item::Done(Message,Size,Hashes,Cfg);
// MetaIndexes are done in two passes: one to download the
// metaindex with an appropriate method, and a second to verify it
}
else
{
+ // FIXME: move this into pkgAcqMetaClearSig::Done on the next
+ // ABI break
+
+ // if we expect a ClearTextSignature (InRelase), ensure that
+ // this is what we get and if not fail to queue a
+ // Release/Release.gpg, see #346386
+ if (SigFile == DestFile && !StartsWithGPGClearTextSignature(DestFile))
+ {
+ Failed(Message, Cfg);
+ return;
+ }
+
// There was a signature file, so pass it to gpgv for
// verification
-
if (_config->FindB("Debug::pkgAcquire::Auth", false))
std::cerr << "Metaindex acquired, queueing gpg verification ("
<< SigFile << "," << DestFile << ")\n";
AuthPass = true;
Desc.URI = "gpgv:" + SigFile;
QueueURI(Desc);
- Mode = "gpgv";
+ ActiveSubprocess = "gpgv";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+ Mode = "gpgv";
+#if __GNUC__ >= 4
+ #pragma GCC diagnostic pop
+#endif
return;
}
}
std::cerr << "Signature verification succeeded: "
<< DestFile << std::endl;
+ // do not trust any previously unverified content that we may have
+ string LastGoodSigFile = _config->FindDir("Dir::State::lists").append("partial/").append(URItoFileName(RealURI));
+ if (DestFile != SigFile)
+ LastGoodSigFile.append(".gpg");
+ LastGoodSigFile.append(".reverify");
+ if(IMSHit == false && RealFileExists(LastGoodSigFile) == false)
+ {
+ for (vector <struct IndexTarget*>::const_iterator Target = IndexTargets->begin();
+ Target != IndexTargets->end();
+ ++Target)
+ {
+ // remove old indexes
+ std::string index = _config->FindDir("Dir::State::lists") +
+ URItoFileName((*Target)->URI);
+ unlink(index.c_str());
+ // and also old gzipindexes
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+ for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t)
+ {
+ index += '.' + (*t);
+ unlink(index.c_str());
+ }
+ }
+ }
+
+
// Download further indexes with verification
QueueIndexes(true);
}
}
- for (vector <struct IndexTarget*>::const_iterator Target = IndexTargets->begin();
+ // at this point the real Items are loaded in the fetcher
+ ExpectedAdditionalItems = 0;
+ for (vector <IndexTarget*>::const_iterator Target = IndexTargets->begin();
Target != IndexTargets->end();
++Target)
{
- HashString ExpectedIndexHash;
+ HashStringList ExpectedIndexHashes;
const indexRecords::checkSum *Record = MetaIndexParser->Lookup((*Target)->MetaKey);
bool compressedAvailable = false;
if (Record == NULL)
}
else
{
- ExpectedIndexHash = Record->Hash;
+ ExpectedIndexHashes = Record->Hashes;
if (_config->FindB("Debug::pkgAcquire::Auth", false))
{
- std::cerr << "Queueing: " << (*Target)->URI << std::endl;
- std::cerr << "Expected Hash: " << ExpectedIndexHash.toStr() << std::endl;
+ std::cerr << "Queueing: " << (*Target)->URI << std::endl
+ << "Expected Hash:" << std::endl;
+ for (HashStringList::const_iterator hs = ExpectedIndexHashes.begin(); hs != ExpectedIndexHashes.end(); ++hs)
+ std::cerr << "\t- " << hs->toStr() << std::endl;
std::cerr << "For: " << Record->MetaKeyFilename << std::endl;
}
- if (verify == true && ExpectedIndexHash.empty() == true && (*Target)->IsOptional() == false)
+ if (verify == true && ExpectedIndexHashes.empty() == true && (*Target)->IsOptional() == false)
{
Status = StatAuthError;
strprintf(ErrorText, _("Unable to find hash sum for '%s' in Release file"), (*Target)->MetaKey.c_str());
{
if ((*Target)->IsSubIndex() == true)
new pkgAcqSubIndex(Owner, (*Target)->URI, (*Target)->Description,
- (*Target)->ShortDesc, ExpectedIndexHash);
+ (*Target)->ShortDesc, ExpectedIndexHashes);
else if (transInRelease == false || Record != NULL || compressedAvailable == true)
{
if (_config->FindB("Acquire::PDiffs",true) == true && transInRelease == true &&
MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true)
- new pkgAcqDiffIndex(Owner, (*Target)->URI, (*Target)->Description,
- (*Target)->ShortDesc, ExpectedIndexHash);
+ new pkgAcqDiffIndex(Owner, *Target, ExpectedIndexHashes, MetaIndexParser);
else
- new pkgAcqIndexTrans(Owner, *Target, ExpectedIndexHash, MetaIndexParser);
+ new pkgAcqIndexTrans(Owner, *Target, ExpectedIndexHashes, MetaIndexParser);
}
continue;
}
instead, but passing the required info to it is to much hassle */
if(_config->FindB("Acquire::PDiffs",true) == true && (verify == false ||
MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true))
- new pkgAcqDiffIndex(Owner, (*Target)->URI, (*Target)->Description,
- (*Target)->ShortDesc, ExpectedIndexHash);
+ new pkgAcqDiffIndex(Owner, *Target, ExpectedIndexHashes, MetaIndexParser);
else
- new pkgAcqIndex(Owner, *Target, ExpectedIndexHash, MetaIndexParser);
+ new pkgAcqIndex(Owner, *Target, ExpectedIndexHashes, MetaIndexParser);
}
}
/*}}}*/
string const &URI, string const &URIDesc, string const &ShortDesc,
string const &MetaIndexURI, string const &MetaIndexURIDesc, string const &MetaIndexShortDesc,
string const &MetaSigURI, string const &MetaSigURIDesc, string const &MetaSigShortDesc,
- const vector<struct IndexTarget*>* IndexTargets,
+ const vector<IndexTarget*>* IndexTargets,
indexRecords* MetaIndexParser) :
pkgAcqMetaIndex(Owner, URI, URIDesc, ShortDesc, "", IndexTargets, MetaIndexParser),
MetaIndexURI(MetaIndexURI), MetaIndexURIDesc(MetaIndexURIDesc), MetaIndexShortDesc(MetaIndexShortDesc),
{
SigFile = DestFile;
+ // index targets + (worst case:) Release/Release.gpg
+ ExpectedAdditionalItems = IndexTargets->size() + 2;
+
+
// keep the old InRelease around in case of transistent network errors
string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
if (RealFileExists(Final) == true)
// pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
// FIXME: this can go away once the InRelease file is used widely
-string pkgAcqMetaClearSig::Custom600Headers()
+string pkgAcqMetaClearSig::Custom600Headers() const
{
string Final = _config->FindDir("Dir::State::lists");
Final += URItoFileName(RealURI);
/*}}}*/
void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
{
+ // we failed, we will not get additional items from this method
+ ExpectedAdditionalItems = 0;
+
if (AuthPass == false)
{
// Remove the 'old' InRelease file if we try Release.gpg now as otherwise
pkgAcqArchive::pkgAcqArchive(pkgAcquire *Owner,pkgSourceList *Sources,
pkgRecords *Recs,pkgCache::VerIterator const &Version,
string &StoreFilename) :
- Item(Owner), Version(Version), Sources(Sources), Recs(Recs),
+ Item(Owner, HashStringList()), Version(Version), Sources(Sources), Recs(Recs),
StoreFilename(StoreFilename), Vf(Version.FileList()),
Trusted(false)
{
checking later. */
bool pkgAcqArchive::QueueNext()
{
- string const ForceHash = _config->Find("Acquire::ForceHash");
for (; Vf.end() == false; ++Vf)
{
// Ignore not source sources
pkgRecords::Parser &Parse = Recs->Lookup(Vf);
if (_error->PendingError() == true)
return false;
-
+
string PkgFile = Parse.FileName();
- if (ForceHash.empty() == false)
- {
- if(stringcasecmp(ForceHash, "sha512") == 0)
- ExpectedHash = HashString("SHA512", Parse.SHA512Hash());
- else if(stringcasecmp(ForceHash, "sha256") == 0)
- ExpectedHash = HashString("SHA256", Parse.SHA256Hash());
- else if (stringcasecmp(ForceHash, "sha1") == 0)
- ExpectedHash = HashString("SHA1", Parse.SHA1Hash());
- else
- ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
- }
- else
- {
- string Hash;
- if ((Hash = Parse.SHA512Hash()).empty() == false)
- ExpectedHash = HashString("SHA512", Hash);
- else if ((Hash = Parse.SHA256Hash()).empty() == false)
- ExpectedHash = HashString("SHA256", Hash);
- else if ((Hash = Parse.SHA1Hash()).empty() == false)
- ExpectedHash = HashString("SHA1", Hash);
- else
- ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
- }
+ ExpectedHashes = Parse.Hashes();
+
if (PkgFile.empty() == true)
return _error->Error(_("The package index files are corrupted. No Filename: "
"field for package %s."),
// AcqArchive::Done - Finished fetching /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcqArchive::Done(string Message,unsigned long long Size,string CalcHash,
+void pkgAcqArchive::Done(string Message,unsigned long long Size, HashStringList const &CalcHashes,
pkgAcquire::MethodConfig *Cfg)
{
- Item::Done(Message,Size,CalcHash,Cfg);
+ Item::Done(Message, Size, CalcHashes, Cfg);
// Check the size
if (Size != Version->Size)
RenameOnError(SizeMismatch);
return;
}
-
- // Check the hash
- if(ExpectedHash.toStr() != CalcHash)
+
+ // FIXME: could this empty() check impose *any* sort of security issue?
+ if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes)
{
RenameOnError(HashSumMismatch);
+ printHashSumComparision(DestFile, ExpectedHashes, CalcHashes);
return;
}
/*}}}*/
// AcqArchive::IsTrusted - Determine whether this archive comes from a trusted source /*{{{*/
// ---------------------------------------------------------------------
-APT_PURE bool pkgAcqArchive::IsTrusted()
+APT_PURE bool pkgAcqArchive::IsTrusted() const
{
return Trusted;
}
// AcqFile::pkgAcqFile - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* The file is added to the queue */
-pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI,string Hash,
+pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI, HashStringList const &Hashes,
unsigned long long Size,string Dsc,string ShortDesc,
const string &DestDir, const string &DestFilename,
bool IsIndexFile) :
- Item(Owner), ExpectedHash(Hash), IsIndexFile(IsIndexFile)
+ Item(Owner, Hashes), IsIndexFile(IsIndexFile)
{
Retries = _config->FindI("Acquire::Retries",0);
// AcqFile::Done - Item downloaded OK /*{{{*/
// ---------------------------------------------------------------------
/* */
-void pkgAcqFile::Done(string Message,unsigned long long Size,string CalcHash,
+void pkgAcqFile::Done(string Message,unsigned long long Size,HashStringList const &CalcHashes,
pkgAcquire::MethodConfig *Cnf)
{
- Item::Done(Message,Size,CalcHash,Cnf);
+ Item::Done(Message,Size,CalcHashes,Cnf);
// Check the hash
- if(!ExpectedHash.empty() && ExpectedHash.toStr() != CalcHash)
+ if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes)
{
RenameOnError(HashSumMismatch);
+ printHashSumComparision(DestFile, ExpectedHashes, CalcHashes);
return;
}
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
-string pkgAcqFile::Custom600Headers()
+string pkgAcqFile::Custom600Headers() const
{
if (IsIndexFile)
return "\nIndex-File: true";