if(TF.Step(Tags) == true)
{
- string local_sha1;
bool found = false;
DiffInfo d;
string size;
- string tmp = Tags.FindS("SHA1-Current");
+ string const tmp = Tags.FindS("SHA1-Current");
std::stringstream ss(tmp);
- ss >> ServerSha1;
+ ss >> ServerSha1 >> size;
+ unsigned long const ServerSize = atol(size.c_str());
FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
SHA1Summation SHA1;
SHA1.AddFD(fd.Fd(), fd.Size());
- local_sha1 = string(SHA1.Result());
+ string const local_sha1 = SHA1.Result();
if(local_sha1 == ServerSha1)
{
std::clog << "SHA1-Current: " << ServerSha1 << std::endl;
// check the historie and see what patches we need
- string history = Tags.FindS("SHA1-History");
+ string const history = Tags.FindS("SHA1-History");
std::stringstream hist(history);
- while(hist >> d.sha1 >> size >> d.file)
+ while(hist >> d.sha1 >> size >> d.file)
{
- d.size = atoi(size.c_str());
// read until the first match is found
+ // from that point on, we probably need all diffs
if(d.sha1 == local_sha1)
found=true;
- // from that point on, we probably need all diffs
- if(found)
+ else if (found == false)
+ continue;
+
+ if(Debug)
+ std::clog << "Need to get diff: " << d.file << std::endl;
+ available_patches.push_back(d);
+ }
+
+ if (available_patches.empty() == false)
+ {
+ // patching with too many files is rather slow compared to a fast download
+ unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+ if (fileLimit != 0 && fileLimit < available_patches.size())
+ {
+ if (Debug)
+ std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // see if the patches are too big
+ found = false; // it was true and it will be true again at the end
+ d = *available_patches.begin();
+ string const firstPatch = d.file;
+ unsigned long patchesSize = 0;
+ std::stringstream patches(Tags.FindS("SHA1-Patches"));
+ while(patches >> d.sha1 >> size >> d.file)
+ {
+ if (firstPatch == d.file)
+ found = true;
+ else if (found == false)
+ continue;
+
+ patchesSize += atol(size.c_str());
+ }
+ unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+ if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
{
- if(Debug)
- std::clog << "Need to get diff: " << d.file << std::endl;
- available_patches.push_back(d);
+ if (Debug)
+ std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+ << ") so fallback to complete download" << std::endl;
+ return false;
}
}
}
if(found)
{
// queue the diffs
- string::size_type last_space = Description.rfind(" ");
+ string::size_type const last_space = Description.rfind(" ");
if(last_space != string::npos)
Description.erase(last_space, Description.size()-last_space);
new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, available_patches);
+ ExpectedHash, ServerSha1, available_patches);
Complete = false;
Status = StatDone;
Dequeue();
pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
string URI,string URIDesc,string ShortDesc,
HashString ExpectedHash,
+ string ServerSha1,
vector<DiffInfo> diffs)
: Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
- available_patches(diffs)
+ available_patches(diffs), ServerSha1(ServerSha1)
{
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
std::clog << "QueueNextDiff: "
<< FinalFile << " (" << local_sha1 << ")"<<std::endl;
+ // final file reached before all patches are applied
+ if(local_sha1 == ServerSha1)
+ {
+ Finish(true);
+ return true;
+ }
+
// remove all patches until the next matching patch is found
// this requires the Index file to be ordered
for(vector<DiffInfo>::iterator I=available_patches.begin();
// see if there is more to download
if(available_patches.size() > 0) {
new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, available_patches);
+ ExpectedHash, ServerSha1, available_patches);
return Finish();
} else
return Finish(true);
if(Desc.URI.substr(nameLen) != *t)
continue;
- // we want to try it with the next extension
+ // we want to try it with the next extension (and make sure to
+ // not skip over the end)
t++;
+ if (t == types.end())
+ break;
- if (t != types.end())
- {
- Desc.URI = Desc.URI.substr(0, nameLen) + *t;
-
- new pkgAcqIndex(Owner, RealURI, Desc.Description, Desc.ShortDesc,
- ExpectedHash, string(".").append(*t));
-
- Status = StatDone;
- Complete = false;
- Dequeue();
- return;
- }
+ // queue new download
+ Desc.URI = Desc.URI.substr(0, nameLen) + *t;
+ new pkgAcqIndex(Owner, RealURI, Desc.Description, Desc.ShortDesc,
+ ExpectedHash, string(".").append(*t));
+
+ Status = StatDone;
+ Complete = false;
+ Dequeue();
+ return;
}
// on decompression failure, remove bad versions in partial/
the archive is already available in the cache and stashs the MD5 for
checking later. */
bool pkgAcqArchive::QueueNext()
-{
+{
+ string const ForceHash = _config->Find("Acquire::ForceHash");
for (; Vf.end() == false; Vf++)
{
// Ignore not source sources
return false;
string PkgFile = Parse.FileName();
- if(Parse.SHA256Hash() != "")
- ExpectedHash = HashString("SHA256", Parse.SHA256Hash());
- else if (Parse.SHA1Hash() != "")
- ExpectedHash = HashString("SHA1", Parse.SHA1Hash());
- else
- ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
+ if (ForceHash.empty() == false)
+ {
+ if(stringcasecmp(ForceHash, "sha256") == 0)
+ ExpectedHash = HashString("SHA256", Parse.SHA256Hash());
+ else if (stringcasecmp(ForceHash, "sha1") == 0)
+ ExpectedHash = HashString("SHA1", Parse.SHA1Hash());
+ else
+ ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
+ }
+ else
+ {
+ string Hash;
+ if ((Hash = Parse.SHA256Hash()).empty() == false)
+ ExpectedHash = HashString("SHA256", Hash);
+ else if ((Hash = Parse.SHA1Hash()).empty() == false)
+ ExpectedHash = HashString("SHA1", Hash);
+ else
+ ExpectedHash = HashString("MD5Sum", Parse.MD5Hash());
+ }
if (PkgFile.empty() == true)
return _error->Error(_("The package index files are corrupted. No Filename: "
"field for package %s."),