// Include Files /*{{{*/
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/configuration.h>
+#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/sourcelist.h>
#include <apt-pkg/vendorlist.h>
#include <apt-pkg/error.h>
if(TF.Step(Tags) == true)
{
- string local_sha1;
bool found = false;
DiffInfo d;
string size;
- string tmp = Tags.FindS("SHA1-Current");
+ string const tmp = Tags.FindS("SHA1-Current");
std::stringstream ss(tmp);
- ss >> ServerSha1;
+ ss >> ServerSha1 >> size;
+ unsigned long const ServerSize = atol(size.c_str());
FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
SHA1Summation SHA1;
SHA1.AddFD(fd.Fd(), fd.Size());
- local_sha1 = string(SHA1.Result());
+ string const local_sha1 = SHA1.Result();
if(local_sha1 == ServerSha1)
{
std::clog << "SHA1-Current: " << ServerSha1 << std::endl;
// check the historie and see what patches we need
- string history = Tags.FindS("SHA1-History");
+ string const history = Tags.FindS("SHA1-History");
std::stringstream hist(history);
- while(hist >> d.sha1 >> size >> d.file)
+ while(hist >> d.sha1 >> size >> d.file)
{
- d.size = atoi(size.c_str());
// read until the first match is found
+ // from that point on, we probably need all diffs
if(d.sha1 == local_sha1)
found=true;
- // from that point on, we probably need all diffs
- if(found)
+ else if (found == false)
+ continue;
+
+ if(Debug)
+ std::clog << "Need to get diff: " << d.file << std::endl;
+ available_patches.push_back(d);
+ }
+
+ if (available_patches.empty() == false)
+ {
+ // patching with too many files is rather slow compared to a fast download
+ unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
+ if (fileLimit != 0 && fileLimit < available_patches.size())
+ {
+ if (Debug)
+ std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
+ << ") so fallback to complete download" << std::endl;
+ return false;
+ }
+
+ // see if the patches are too big
+ found = false; // it was true and it will be true again at the end
+ d = *available_patches.begin();
+ string const firstPatch = d.file;
+ unsigned long patchesSize = 0;
+ std::stringstream patches(Tags.FindS("SHA1-Patches"));
+ while(patches >> d.sha1 >> size >> d.file)
+ {
+ if (firstPatch == d.file)
+ found = true;
+ else if (found == false)
+ continue;
+
+ patchesSize += atol(size.c_str());
+ }
+ unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
+ if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
{
- if(Debug)
- std::clog << "Need to get diff: " << d.file << std::endl;
- available_patches.push_back(d);
+ if (Debug)
+ std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
+ << ") so fallback to complete download" << std::endl;
+ return false;
}
}
}
if(found)
{
// queue the diffs
- string::size_type last_space = Description.rfind(" ");
+ string::size_type const last_space = Description.rfind(" ");
if(last_space != string::npos)
Description.erase(last_space, Description.size()-last_space);
new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, available_patches);
+ ExpectedHash, ServerSha1, available_patches);
Complete = false;
Status = StatDone;
Dequeue();
pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
string URI,string URIDesc,string ShortDesc,
HashString ExpectedHash,
+ string ServerSha1,
vector<DiffInfo> diffs)
: Item(Owner), RealURI(URI), ExpectedHash(ExpectedHash),
- available_patches(diffs)
+ available_patches(diffs), ServerSha1(ServerSha1)
{
DestFile = _config->FindDir("Dir::State::lists") + "partial/";
std::clog << "QueueNextDiff: "
<< FinalFile << " (" << local_sha1 << ")"<<std::endl;
+ // final file reached before all patches are applied
+ if(local_sha1 == ServerSha1)
+ {
+ Finish(true);
+ return true;
+ }
+
// remove all patches until the next matching patch is found
// this requires the Index file to be ordered
for(vector<DiffInfo>::iterator I=available_patches.begin();
// see if there is more to download
if(available_patches.size() > 0) {
new pkgAcqIndexDiffs(Owner, RealURI, Description, Desc.ShortDesc,
- ExpectedHash, available_patches);
+ ExpectedHash, ServerSha1, available_patches);
return Finish();
} else
return Finish(true);
if(comprExt.empty())
{
// autoselect the compression method
- Configuration::Item const *Opts = _config->Tree("Acquire::CompressionTypes");
- if (Opts != 0)
- Opts = Opts->Child;
-
- const char dirBin[] = "Dir::Bin::";
- for (; Opts != 0; Opts = Opts->Next)
- {
- if (Opts->Tag.empty() == true || Opts->Value.empty() == true)
- continue;
- const string bin = _config->FindFile(string(dirBin).append(Opts->Value).c_str(),"");
- if (bin != "" && !FileExists(bin))
- continue;
- comprExt = '.' + Opts->Tag;
- break;
- }
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
+ if (types.empty() == true)
+ comprExt = "plain";
+ else
+ comprExt = "." + types[0];
}
CompressionExtension = ((comprExt == "plain" || comprExt == ".") ? "" : comprExt);
/*}}}*/
void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
{
- Configuration::Item const *Opts = _config->Tree("Acquire::CompressionTypes");
- if (Opts != 0)
- Opts = Opts->Child;
+ std::vector<std::string> types = APT::Configuration::getCompressionTypes();
- const char dirBin[] = "Dir::Bin::";
- for (; Opts != 0; Opts = Opts->Next)
+ for (std::vector<std::string>::const_iterator t = types.begin();
+ t != types.end(); t++)
{
- if (Opts->Tag.empty() == true || Opts->Value.empty() == true)
+ // jump over all already tried compression types
+ const unsigned int nameLen = Desc.URI.size() - (*t).size();
+ if(Desc.URI.substr(nameLen) != *t)
continue;
- // jump over all already checked compression types
- const unsigned int nameLen = Desc.URI.size() - Opts->Tag.size();
- if(Desc.URI.substr(nameLen) != Opts->Tag || Opts->Next == 0)
- continue;
-
- // check if we need an external binary for this compression type
- const string bin = _config->FindFile(string(dirBin).append(Opts->Next->Value).c_str(),"");
- if (bin != "" && !FileExists(bin))
- continue;
-
- // retry with the next extension
- Desc.URI = Desc.URI.substr(0, nameLen) + Opts->Next->Tag;
+ // we want to try it with the next extension (and make sure to
+ // not skip over the end)
+ t++;
+ if (t == types.end())
+ break;
+ // queue new download
+ Desc.URI = Desc.URI.substr(0, nameLen) + *t;
new pkgAcqIndex(Owner, RealURI, Desc.Description, Desc.ShortDesc,
- ExpectedHash, string(".").append(Opts->Next->Tag));
-
+ ExpectedHash, string(".").append(*t));
+
Status = StatDone;
Complete = false;
Dequeue();