X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/30b683f4f3021cd191ffef04bfaf2deb65820a52..0977f81757f42889a9edb761061529041afe0a06:/apt-pkg/acquire-item.cc diff --git a/apt-pkg/acquire-item.cc b/apt-pkg/acquire-item.cc index eee1097e9..5187738e9 100644 --- a/apt-pkg/acquire-item.cc +++ b/apt-pkg/acquire-item.cc @@ -44,6 +44,9 @@ #include #include #include +#include +#include +#include #include /*}}}*/ @@ -62,18 +65,51 @@ static void printHashSumComparision(std::string const &URI, HashStringList const std::cerr << "\t- " << hs->toStr() << std::endl; } /*}}}*/ +static void ChangeOwnerAndPermissionOfFile(char const * const requester, char const * const file, char const * const user, char const * const group, mode_t const mode) +{ + // ensure the file is owned by root and has good permissions + struct passwd const * const pw = getpwnam(user); + struct group const * const gr = getgrnam(group); + if (getuid() == 0) // if we aren't root, we can't chown, so don't try it + { + if (pw != NULL && gr != NULL && chown(file, pw->pw_uid, gr->gr_gid) != 0) + _error->WarningE(requester, "chown to %s:%s of file %s failed", user, group, file); + } + if (chmod(file, mode) != 0) + _error->WarningE(requester, "chmod 0%o of file %s failed", mode, file); +} +static std::string GetPartialFileName(std::string const &file) +{ + std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/"; + DestFile += file; + return DestFile; +} +static std::string GetPartialFileNameFromURI(std::string const &uri) +{ + return GetPartialFileName(URItoFileName(uri)); +} + // Acquire::Item::Item - Constructor /*{{{*/ +#if __GNUC__ >= 4 + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif pkgAcquire::Item::Item(pkgAcquire *Owner, HashStringList const &ExpectedHashes, - unsigned long TransactionID) + pkgAcqMetaBase *TransactionManager) : Owner(Owner), FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), - Local(false), QueueCounter(0), TransactionID(TransactionID), + Local(false), QueueCounter(0), TransactionManager(TransactionManager), ExpectedAdditionalItems(0), ExpectedHashes(ExpectedHashes) { Owner->Add(this); Status = StatIdle; + if(TransactionManager != NULL) + TransactionManager->Add(this); } +#if __GNUC__ >= 4 + #pragma GCC diagnostic pop +#endif /*}}}*/ // Acquire::Item::~Item - Destructor /*{{{*/ // --------------------------------------------------------------------- @@ -89,7 +125,6 @@ pkgAcquire::Item::~Item() fetch this object */ void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf) { - Status = StatIdle; if(ErrorText == "") ErrorText = LookupTag(Message,"Message"); UsedMirror = LookupTag(Message,"UsedMirror"); @@ -98,7 +133,7 @@ void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /* This indicates that the file is not available right now but might be sometime later. If we do a retry cycle then this should be retried [CDROMs] */ - if (Cnf->LocalOnly == true && + if (Cnf != NULL && Cnf->LocalOnly == true && StringToBool(LookupTag(Message,"Transient-Failure"),false) == true) { Status = StatIdle; @@ -107,11 +142,18 @@ void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf) } Status = StatError; + Complete = false; Dequeue(); - } + } + else + Status = StatIdle; - // report mirror failure back to LP if we actually use a mirror + // check fail reason string FailReason = LookupTag(Message, "FailReason"); + if(FailReason == "MaximumSizeExceeded") + Rename(DestFile, DestFile+".FAILED"); + + // report mirror failure back to LP if we actually use a mirror if(FailReason.size() != 0) ReportMirrorFailure(FailReason); else @@ -155,7 +197,7 @@ void pkgAcquire::Item::Done(string Message,unsigned long long Size,HashStringLis // --------------------------------------------------------------------- /* This helper function is used by a lot of item methods as their final step */ -void pkgAcquire::Item::Rename(string From,string To) +bool pkgAcquire::Item::Rename(string From,string To) { if (rename(From.c_str(),To.c_str()) != 0) { @@ -163,10 +205,28 @@ void pkgAcquire::Item::Rename(string From,string To) snprintf(S,sizeof(S),_("rename failed, %s (%s -> %s)."),strerror(errno), From.c_str(),To.c_str()); Status = StatError; - ErrorText = S; + ErrorText += S; + return false; } + return true; } /*}}}*/ + +void pkgAcquire::Item::QueueURI(ItemDesc &Item) +{ + if (RealFileExists(DestFile)) + { + std::string SandboxUser = _config->Find("APT::Sandbox::User"); + ChangeOwnerAndPermissionOfFile("GetPartialFileName", DestFile.c_str(), + SandboxUser.c_str(), "root", 0600); + } + Owner->Enqueue(Item); +} +void pkgAcquire::Item::Dequeue() +{ + Owner->Dequeue(this); +} + bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/ { if(FileExists(DestFile)) @@ -189,8 +249,29 @@ bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const Status = StatError; // do not report as usually its not the mirrors fault, but Portal/Proxy break; + case SignatureError: + ErrorText = _("Signature error"); + Status = StatError; + break; + case NotClearsigned: + ErrorText = _("Does not start with a cleartext signature"); + Status = StatError; + break; } return false; +} + /*}}}*/ +void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/ +{ + ActiveSubprocess = subprocess; +#if __GNUC__ >= 4 + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif + Mode = ActiveSubprocess.c_str(); +#if __GNUC__ >= 4 + #pragma GCC diagnostic pop +#endif } /*}}}*/ // Acquire::Item::ReportMirrorFailure /*{{{*/ @@ -237,120 +318,6 @@ void pkgAcquire::Item::ReportMirrorFailure(string FailCode) } } /*}}}*/ -// AcqSubIndex::AcqSubIndex - Constructor /*{{{*/ -// --------------------------------------------------------------------- -/* Get a sub-index file based on checksums from a 'master' file and - possibly query additional files */ -pkgAcqSubIndex::pkgAcqSubIndex(pkgAcquire *Owner, - unsigned long TransactionID, - string const &URI, - string const &URIDesc, string const &ShortDesc, - HashStringList const &ExpectedHashes) - : Item(Owner, ExpectedHashes, TransactionID) -{ - /* XXX: Beware: Currently this class does nothing (of value) anymore ! */ - Debug = _config->FindB("Debug::pkgAcquire::SubIndex",false); - - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(URI); - - Desc.URI = URI; - Desc.Description = URIDesc; - Desc.Owner = this; - Desc.ShortDesc = ShortDesc; - - QueueURI(Desc); - - if(Debug) - std::clog << "pkgAcqSubIndex: " << Desc.URI << std::endl; -} - /*}}}*/ -// AcqSubIndex::Custom600Headers - Insert custom request headers /*{{{*/ -// --------------------------------------------------------------------- -/* The only header we use is the last-modified header. */ -string pkgAcqSubIndex::Custom600Headers() const -{ - string Final = _config->FindDir("Dir::State::lists"); - Final += URItoFileName(Desc.URI); - - struct stat Buf; - if (stat(Final.c_str(),&Buf) != 0) - return "\nIndex-File: true\nFail-Ignore: true\n"; - return "\nIndex-File: true\nFail-Ignore: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); -} - /*}}}*/ -void pkgAcqSubIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/ -{ - if(Debug) - std::clog << "pkgAcqSubIndex failed: " << Desc.URI << " with " << Message << std::endl; - - Complete = false; - Status = StatDone; - Dequeue(); - - // No good Index is provided -} - /*}}}*/ -void pkgAcqSubIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ - pkgAcquire::MethodConfig *Cnf) -{ - if(Debug) - std::clog << "pkgAcqSubIndex::Done(): " << Desc.URI << std::endl; - - string FileName = LookupTag(Message,"Filename"); - if (FileName.empty() == true) - { - Status = StatError; - ErrorText = "Method gave a blank filename"; - return; - } - - if (FileName != DestFile) - { - Local = true; - Desc.URI = "copy:" + FileName; - QueueURI(Desc); - return; - } - - Item::Done(Message, Size, Hashes, Cnf); - - string FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(Desc.URI); - - /* Downloaded invalid transindex => Error (LP: #346386) (Closes: #627642) */ - indexRecords SubIndexParser; - if (FileExists(DestFile) == true && !SubIndexParser.Load(DestFile)) { - Status = StatError; - ErrorText = SubIndexParser.ErrorText; - return; - } - - // success in downloading the index - // rename the index - if(Debug) - std::clog << "Renaming: " << DestFile << " -> " << FinalFile << std::endl; - Rename(DestFile,FinalFile); - chmod(FinalFile.c_str(),0644); - DestFile = FinalFile; - - if(ParseIndex(DestFile) == false) - return Failed("", NULL); - - Complete = true; - Status = StatDone; - Dequeue(); - return; -} - /*}}}*/ -bool pkgAcqSubIndex::ParseIndex(string const &IndexFile) /*{{{*/ -{ - indexRecords SubIndexParser; - if (FileExists(IndexFile) == false || SubIndexParser.Load(IndexFile) == false) - return false; - // so something with the downloaded index - return true; -} - /*}}}*/ // AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/ // --------------------------------------------------------------------- /* Get the DiffIndex file first and see if there are patches available @@ -359,24 +326,23 @@ bool pkgAcqSubIndex::ParseIndex(string const &IndexFile) /*{{{*/ * the original packages file */ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner, - unsigned long TransactionID, + pkgAcqMetaBase *TransactionManager, IndexTarget const * const Target, HashStringList const &ExpectedHashes, indexRecords *MetaIndexParser) - : pkgAcqBaseIndex(Owner, TransactionID, Target, ExpectedHashes, - MetaIndexParser) + : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, + MetaIndexParser), PackagesFileReadyInPartial(false) { Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); RealURI = Target->URI; Desc.Owner = this; - Desc.Description = Target->Description + "/DiffIndex"; + Desc.Description = Target->Description + ".diff/Index"; Desc.ShortDesc = Target->ShortDesc; Desc.URI = Target->URI + ".diff/Index"; - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(Desc.URI); + DestFile = GetPartialFileNameFromURI(Desc.URI); if(Debug) std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl; @@ -392,9 +358,7 @@ pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner, Desc.URI.substr(0,strlen("file:/")) == "file:/") { // we don't have a pkg file or we don't want to queue - if(Debug) - std::clog << "No index file, local or canceld by user" << std::endl; - Failed("", NULL); + Failed("No index file, local or canceld by user", NULL); return; } @@ -413,7 +377,7 @@ string pkgAcqDiffIndex::Custom600Headers() const { string Final = _config->FindDir("Dir::State::lists"); Final += URItoFileName(Desc.URI); - + if(Debug) std::clog << "Custom600Header-IMS: " << Final << std::endl; @@ -426,173 +390,296 @@ string pkgAcqDiffIndex::Custom600Headers() const /*}}}*/ bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/ { + // failing here is fine: our caller will take care of trying to + // get the complete file if patching fails if(Debug) std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile << std::endl; - pkgTagSection Tags; - string ServerSha1; - vector available_patches; - FileFd Fd(IndexDiffFile,FileFd::ReadOnly); pkgTagFile TF(&Fd); if (_error->PendingError() == true) return false; - if(TF.Step(Tags) == true) + pkgTagSection Tags; + if(unlikely(TF.Step(Tags) == false)) + return false; + + HashStringList ServerHashes; + unsigned long long ServerSize = 0; + + for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) { - bool found = false; - DiffInfo d; - string size; + std::string tagname = *type; + tagname.append("-Current"); + std::string const tmp = Tags.FindS(tagname.c_str()); + if (tmp.empty() == true) + continue; - string const tmp = Tags.FindS("SHA1-Current"); + string hash; + unsigned long long size; std::stringstream ss(tmp); - ss >> ServerSha1 >> size; - unsigned long const ServerSize = atol(size.c_str()); + ss >> hash >> size; + if (unlikely(hash.empty() == true)) + continue; + if (unlikely(ServerSize != 0 && ServerSize != size)) + continue; + ServerHashes.push_back(HashString(*type, hash)); + ServerSize = size; + } - FileFd fd(CurrentPackagesFile, FileFd::ReadOnly); - SHA1Summation SHA1; - SHA1.AddFD(fd); - string const local_sha1 = SHA1.Result(); + if (ServerHashes.usable() == false) + { + if (Debug == true) + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl; + return false; + } - if(local_sha1 == ServerSha1) + if (ServerHashes != HashSums()) + { + if (Debug == true) { - // we have the same sha1 as the server so we are done here - if(Debug) - std::clog << "Package file is up-to-date" << std::endl; - // list cleanup needs to know that this file as well as the already - // present index is ours, so we create an empty diff to save it for us - new pkgAcqIndexDiffs(Owner, TransactionID, Target, - ExpectedHashes, MetaIndexParser, - ServerSha1, available_patches); - return true; + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl; + printHashSumComparision(CurrentPackagesFile, ServerHashes, HashSums()); } - else + return false; + } + + if (ServerHashes.VerifyFile(CurrentPackagesFile) == true) + { + // we have the same sha1 as the server so we are done here + if(Debug) + std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl; + + // list cleanup needs to know that this file as well as the already + // present index is ours, so we create an empty diff to save it for us + new pkgAcqIndexDiffs(Owner, TransactionManager, Target, + ExpectedHashes, MetaIndexParser); + return true; + } + + FileFd fd(CurrentPackagesFile, FileFd::ReadOnly); + Hashes LocalHashesCalc; + LocalHashesCalc.AddFD(fd); + HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); + + if(Debug) + std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at " + << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl; + + // parse all of (provided) history + vector available_patches; + bool firstAcceptedHashes = true; + for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) + { + if (LocalHashes.find(*type) == NULL) + continue; + + std::string tagname = *type; + tagname.append("-History"); + std::string const tmp = Tags.FindS(tagname.c_str()); + if (tmp.empty() == true) + continue; + + string hash, filename; + unsigned long long size; + std::stringstream ss(tmp); + + while (ss >> hash >> size >> filename) { - if(Debug) - std::clog << "SHA1-Current: " << ServerSha1 << " and we start at "<< fd.Name() << " " << fd.Size() << " " << local_sha1 << std::endl; + if (unlikely(hash.empty() == true || filename.empty() == true)) + continue; - // check the historie and see what patches we need - string const history = Tags.FindS("SHA1-History"); - std::stringstream hist(history); - while(hist >> d.sha1 >> size >> d.file) + // see if we have a record for this file already + std::vector::iterator cur = available_patches.begin(); + for (; cur != available_patches.end(); ++cur) { - // read until the first match is found - // from that point on, we probably need all diffs - if(d.sha1 == local_sha1) - found=true; - else if (found == false) + if (cur->file != filename || unlikely(cur->result_size != size)) continue; - - if(Debug) - std::clog << "Need to get diff: " << d.file << std::endl; - available_patches.push_back(d); + cur->result_hashes.push_back(HashString(*type, hash)); + break; } - - if (available_patches.empty() == false) + if (cur != available_patches.end()) + continue; + if (firstAcceptedHashes == true) { - // patching with too many files is rather slow compared to a fast download - unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0); - if (fileLimit != 0 && fileLimit < available_patches.size()) - { - if (Debug) - std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit - << ") so fallback to complete download" << std::endl; - return false; - } - - // see if the patches are too big - found = false; // it was true and it will be true again at the end - d = *available_patches.begin(); - string const firstPatch = d.file; - unsigned long patchesSize = 0; - std::stringstream patches(Tags.FindS("SHA1-Patches")); - while(patches >> d.sha1 >> size >> d.file) - { - if (firstPatch == d.file) - found = true; - else if (found == false) - continue; - - patchesSize += atol(size.c_str()); - } - unsigned long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100); - if (sizeLimit > 0 && (sizeLimit/100) < patchesSize) - { - if (Debug) - std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100 - << ") so fallback to complete download" << std::endl; - return false; - } + DiffInfo next; + next.file = filename; + next.result_hashes.push_back(HashString(*type, hash)); + next.result_size = size; + next.patch_size = 0; + available_patches.push_back(next); + } + else + { + if (Debug == true) + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename + << " wasn't in the list for the first parsed hash! (history)" << std::endl; + break; } } + firstAcceptedHashes = false; + } + + if (unlikely(available_patches.empty() == true)) + { + if (Debug) + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": " + << "Couldn't find any patches for the patch series." << std::endl; + return false; + } - // we have something, queue the next diff - if(found) + for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) + { + if (LocalHashes.find(*type) == NULL) + continue; + + std::string tagname = *type; + tagname.append("-Patches"); + std::string const tmp = Tags.FindS(tagname.c_str()); + if (tmp.empty() == true) + continue; + + string hash, filename; + unsigned long long size; + std::stringstream ss(tmp); + + while (ss >> hash >> size >> filename) { - // queue the diffs - string::size_type const last_space = Description.rfind(" "); - if(last_space != string::npos) - Description.erase(last_space, Description.size()-last_space); - - /* decide if we should download patches one by one or in one go: - The first is good if the server merges patches, but many don't so client - based merging can be attempt in which case the second is better. - "bad things" will happen if patches are merged on the server, - but client side merging is attempt as well */ - bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true); - if (pdiff_merge == true) - { - // reprepro adds this flag if it has merged patches on the server - std::string const precedence = Tags.FindS("X-Patch-Precedence"); - pdiff_merge = (precedence != "merged"); - } + if (unlikely(hash.empty() == true || filename.empty() == true)) + continue; - if (pdiff_merge == false) - { - new pkgAcqIndexDiffs(Owner, TransactionID, Target, ExpectedHashes, - MetaIndexParser, - ServerSha1, available_patches); - } - else + // see if we have a record for this file already + std::vector::iterator cur = available_patches.begin(); + for (; cur != available_patches.end(); ++cur) { - std::vector *diffs = new std::vector(available_patches.size()); - for(size_t i = 0; i < available_patches.size(); ++i) - (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, - TransactionID, - Target, - ExpectedHashes, - MetaIndexParser, - available_patches[i], - diffs); + if (cur->file != filename) + continue; + if (unlikely(cur->patch_size != 0 && cur->patch_size != size)) + continue; + cur->patch_hashes.push_back(HashString(*type, hash)); + cur->patch_size = size; + break; } - - Complete = false; - Status = StatDone; - Dequeue(); - return true; + if (cur != available_patches.end()) + continue; + if (Debug == true) + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename + << " wasn't in the list for the first parsed hash! (patches)" << std::endl; + break; } } + + bool foundStart = false; + for (std::vector::iterator cur = available_patches.begin(); + cur != available_patches.end(); ++cur) + { + if (LocalHashes != cur->result_hashes) + continue; + + available_patches.erase(available_patches.begin(), cur); + foundStart = true; + break; + } + + if (foundStart == false || unlikely(available_patches.empty() == true)) + { + if (Debug) + std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": " + << "Couldn't find the start of the patch series." << std::endl; + return false; + } + + // patching with too many files is rather slow compared to a fast download + unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0); + if (fileLimit != 0 && fileLimit < available_patches.size()) + { + if (Debug) + std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit + << ") so fallback to complete download" << std::endl; + return false; + } + + // calculate the size of all patches we have to get + // note that all sizes are uncompressed, while we download compressed files + unsigned long long patchesSize = 0; + for (std::vector::const_iterator cur = available_patches.begin(); + cur != available_patches.end(); ++cur) + patchesSize += cur->patch_size; + unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100); + if (false && sizeLimit > 0 && (sizeLimit/100) < patchesSize) + { + if (Debug) + std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100 + << ") so fallback to complete download" << std::endl; + return false; + } + + // FIXME: make this use the method + PackagesFileReadyInPartial = true; + std::string const Partial = GetPartialFileNameFromURI(RealURI); + + FileFd From(CurrentPackagesFile, FileFd::ReadOnly); + FileFd To(Partial, FileFd::WriteEmpty); + if(CopyFile(From, To) == false) + return _error->Errno("CopyFile", "failed to copy"); - // Nothing found, report and return false - // Failing here is ok, if we return false later, the full - // IndexFile is queued if(Debug) - std::clog << "Can't find a patch in the index file" << std::endl; - return false; + std::cerr << "Done copying " << CurrentPackagesFile + << " -> " << Partial + << std::endl; + + // we have something, queue the diffs + string::size_type const last_space = Description.rfind(" "); + if(last_space != string::npos) + Description.erase(last_space, Description.size()-last_space); + + /* decide if we should download patches one by one or in one go: + The first is good if the server merges patches, but many don't so client + based merging can be attempt in which case the second is better. + "bad things" will happen if patches are merged on the server, + but client side merging is attempt as well */ + bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true); + if (pdiff_merge == true) + { + // reprepro adds this flag if it has merged patches on the server + std::string const precedence = Tags.FindS("X-Patch-Precedence"); + pdiff_merge = (precedence != "merged"); + } + + if (pdiff_merge == false) + { + new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes, + MetaIndexParser, available_patches); + } + else + { + std::vector *diffs = new std::vector(available_patches.size()); + for(size_t i = 0; i < available_patches.size(); ++i) + (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager, + Target, + ExpectedHashes, + MetaIndexParser, + available_patches[i], + diffs); + } + + Complete = false; + Status = StatDone; + Dequeue(); + return true; } /*}}}*/ -void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/ +void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/ { if(Debug) std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl << "Falling back to normal index file acquire" << std::endl; - new pkgAcqIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser); + new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); - Complete = false; + Item::Failed(Message,Cnf); Status = StatDone; - Dequeue(); } /*}}}*/ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ @@ -603,21 +690,33 @@ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList Item::Done(Message, Size, Hashes, Cnf); + // verify the index target + if(Target && Target->MetaKey != "" && MetaIndexParser && Hashes.usable()) + { + std::string IndexMetaKey = Target->MetaKey + ".diff/Index"; + indexRecords::checkSum *Record = MetaIndexParser->Lookup(IndexMetaKey); + if(Record && Record->Hashes.usable() && Hashes != Record->Hashes) + { + RenameOnError(HashSumMismatch); + printHashSumComparision(RealURI, Record->Hashes, Hashes); + Failed(Message, Cnf); + return; + } + + } + string FinalFile; - FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI); + FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(Desc.URI); - // success in downloading the index - // rename the index - FinalFile += string(".IndexDiff"); - if(Debug) - std::clog << "Renaming: " << DestFile << " -> " << FinalFile - << std::endl; - Rename(DestFile,FinalFile); - chmod(FinalFile.c_str(),0644); - DestFile = FinalFile; + if(StringToBool(LookupTag(Message,"IMS-Hit"),false)) + DestFile = FinalFile; if(!ParseDiffIndex(DestFile)) - return Failed("", NULL); + return Failed("Message: Couldn't parse pdiff index", Cnf); + + // queue for final move + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); Complete = true; Status = StatDone; @@ -631,18 +730,15 @@ void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList * for each diff and the index */ pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner, - unsigned long TransactionID, + pkgAcqMetaBase *TransactionManager, struct IndexTarget const * const Target, HashStringList const &ExpectedHashes, indexRecords *MetaIndexParser, - string ServerSha1, vector diffs) - : pkgAcqBaseIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser), - available_patches(diffs), ServerSha1(ServerSha1) + : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser), + available_patches(diffs) { - - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(Target->URI); + DestFile = GetPartialFileNameFromURI(Target->URI); Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); @@ -653,7 +749,9 @@ pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner, if(available_patches.empty() == true) { - // we are done (yeah!) + // we are done (yeah!), check hashes against the final file + DestFile = _config->FindDir("Dir::State::lists"); + DestFile += URItoFileName(Target->URI); Finish(true); } else @@ -669,7 +767,7 @@ void pkgAcqIndexDiffs::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/) if(Debug) std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl << "Falling back to normal index file acquire" << std::endl; - new pkgAcqIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser); + new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); Finish(); } /*}}}*/ @@ -685,13 +783,6 @@ void pkgAcqIndexDiffs::Finish(bool allDone) // the file will be cleaned if(allDone) { - DestFile = _config->FindDir("Dir::State::lists"); - DestFile += URItoFileName(RealURI); - - // FIXME: we want the rred stuff to use the real transactional update - // this is just a workaround - PartialFile = DestFile; - if(HashSums().usable() && !HashSums().VerifyFile(DestFile)) { RenameOnError(HashSumMismatch); @@ -699,6 +790,11 @@ void pkgAcqIndexDiffs::Finish(bool allDone) return; } + // queue for copy + std::string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); + // this is for the "real" finish Complete = true; Status = StatDone; @@ -718,21 +814,32 @@ void pkgAcqIndexDiffs::Finish(bool allDone) /*}}}*/ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/ { - // calc sha1 of the just patched file - string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(RealURI); + std::string const FinalFile = GetPartialFileNameFromURI(RealURI); + + if(!FileExists(FinalFile)) + { + Failed("Message: No FinalFile " + FinalFile + " available", NULL); + return false; + } FileFd fd(FinalFile, FileFd::ReadOnly); - SHA1Summation SHA1; - SHA1.AddFD(fd); - string local_sha1 = string(SHA1.Result()); + Hashes LocalHashesCalc; + LocalHashesCalc.AddFD(fd); + HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); + if(Debug) - std::clog << "QueueNextDiff: " - << FinalFile << " (" << local_sha1 << ")"<toStr() << ")" << std::endl; + + if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false)) + { + Failed("Local/Expected hashes are not usable", NULL); + return false; + } + // final file reached before all patches are applied - if(local_sha1 == ServerSha1) + if(LocalHashes == ExpectedHashes) { Finish(true); return true; @@ -740,10 +847,10 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/ // remove all patches until the next matching patch is found // this requires the Index file to be ordered - for(vector::iterator I=available_patches.begin(); + for(vector::iterator I = available_patches.begin(); available_patches.empty() == false && I != available_patches.end() && - I->sha1 != local_sha1; + I->result_hashes != LocalHashes; ++I) { available_patches.erase(I); @@ -752,19 +859,18 @@ bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/ // error checking and falling back if no patch was found if(available_patches.empty() == true) { - Failed("", NULL); + Failed("No patches left to reach target", NULL); return false; } // queue the right diff Desc.URI = RealURI + ".diff/" + available_patches[0].file + ".gz"; Desc.Description = Description + " " + available_patches[0].file + string(".pdiff"); - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(RealURI + ".diff/" + available_patches[0].file); + DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + available_patches[0].file); if(Debug) std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl; - + QueueURI(Desc); return true; @@ -778,12 +884,23 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi Item::Done(Message, Size, Hashes, Cnf); - string FinalFile; - FinalFile = _config->FindDir("Dir::State::lists")+URItoFileName(RealURI); + // FIXME: verify this download too before feeding it to rred + std::string const FinalFile = GetPartialFileNameFromURI(RealURI); // success in downloading a diff, enter ApplyDiff state if(State == StateFetchDiff) { + FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip); + class Hashes LocalHashesCalc; + LocalHashesCalc.AddFD(fd); + HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); + + if (fd.Size() != available_patches[0].patch_size || + available_patches[0].patch_hashes != LocalHashes) + { + Failed("Patch has Size/Hashsum mismatch", NULL); + return; + } // rred excepts the patch as $FinalFile.ed Rename(DestFile,FinalFile+".ed"); @@ -795,7 +912,7 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi Local = true; Desc.URI = "rred:" + FinalFile; QueueURI(Desc); - Mode = "rred"; + SetActiveSubprocess("rred"); return; } @@ -818,30 +935,28 @@ void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringLi // see if there is more to download if(available_patches.empty() == false) { - new pkgAcqIndexDiffs(Owner, TransactionID, Target, + new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser, - ServerSha1, available_patches); + available_patches); return Finish(); } else + // update + DestFile = FinalFile; return Finish(true); } } /*}}}*/ // AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/ pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner, - unsigned long TransactionID, + pkgAcqMetaBase *TransactionManager, struct IndexTarget const * const Target, HashStringList const &ExpectedHashes, indexRecords *MetaIndexParser, DiffInfo const &patch, std::vector const * const allPatches) - : pkgAcqBaseIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser), + : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser), patch(patch), allPatches(allPatches), State(StateFetchDiff) { - - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(Target->URI); - Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); RealURI = Target->URI; @@ -851,8 +966,8 @@ pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner, Desc.URI = RealURI + ".diff/" + patch.file + ".gz"; Desc.Description = Description + " " + patch.file + string(".pdiff"); - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(RealURI + ".diff/" + patch.file); + + DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + patch.file); if(Debug) std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl; @@ -860,13 +975,13 @@ pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner, QueueURI(Desc); } /*}}}*/ -void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * /*Cnf*/)/*{{{*/ +void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/ { if(Debug) std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl; - Complete = false; + + Item::Failed(Message,Cnf); Status = StatDone; - Dequeue(); // check if we are the first to fail, otherwise we are done here State = StateDoneDiff; @@ -878,7 +993,7 @@ void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * /*C // first failure means we should fallback State = StateErrorDiff; std::clog << "Falling back to normal index file acquire" << std::endl; - new pkgAcqIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser); + new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); } /*}}}*/ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ @@ -889,10 +1004,22 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri Item::Done(Message,Size,Hashes,Cnf); - string const FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); + // FIXME: verify download before feeding it to rred + string const FinalFile = GetPartialFileNameFromURI(RealURI); if (State == StateFetchDiff) { + FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip); + class Hashes LocalHashesCalc; + LocalHashesCalc.AddFD(fd); + HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); + + if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes) + { + Failed("Patch has Size/Hashsum mismatch", NULL); + return; + } + // rred expects the patch as $FinalFile.ed.$patchname.gz Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz"); @@ -916,7 +1043,7 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri Local = true; Desc.URI = "rred:" + FinalFile; QueueURI(Desc); - Mode = "rred"; + SetActiveSubprocess("rred"); return; } // success in download/apply all diffs, clean up @@ -929,24 +1056,25 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri return; } + + std::string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + // move the result into place if(Debug) - std::clog << "Moving patched file in place: " << std::endl + std::clog << "Queue patched file in place: " << std::endl << DestFile << " -> " << FinalFile << std::endl; - Rename(DestFile, FinalFile); - chmod(FinalFile.c_str(), 0644); - // otherwise lists cleanup will eat the file - DestFile = FinalFile; - // FIXME: make the merged rred code really transactional - PartialFile = FinalFile; + // queue for copy by the transaction manager + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); // ensure the ed's are gone regardless of list-cleanup for (std::vector::const_iterator I = allPatches->begin(); I != allPatches->end(); ++I) { - std::string patch = FinalFile + ".ed." + (*I)->patch.file + ".gz"; - unlink(patch.c_str()); + std::string const PartialFile = GetPartialFileNameFromURI(RealURI); + std::string patch = PartialFile + ".ed." + (*I)->patch.file + ".gz"; + unlink(patch.c_str()); } // all set and done @@ -956,72 +1084,95 @@ void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStri } } /*}}}*/ +// AcqBaseIndex::VerifyHashByMetaKey - verify hash for the given metakey /*{{{*/ +bool pkgAcqBaseIndex::VerifyHashByMetaKey(HashStringList const &Hashes) +{ + if(MetaKey != "" && Hashes.usable()) + { + indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); + if(Record && Record->Hashes.usable() && Hashes != Record->Hashes) + { + printHashSumComparision(RealURI, Record->Hashes, Hashes); + return false; + } + } + return true; +} + /*}}}*/ // AcqIndex::AcqIndex - Constructor /*{{{*/ // --------------------------------------------------------------------- -/* The package file is added to the queue and a second class is - instantiated to fetch the revision file */ +/* The package file is added to the queue and a second class is + instantiated to fetch the revision file */ pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, string URI,string URIDesc,string ShortDesc, - HashStringList const &ExpectedHash, string comprExt) - : pkgAcqBaseIndex(Owner, 0, NULL, ExpectedHash, NULL), RealURI(URI) + HashStringList const &ExpectedHash) + : pkgAcqBaseIndex(Owner, 0, NULL, ExpectedHash, NULL) { + RealURI = URI; + AutoSelectCompression(); Init(URI, URIDesc, ShortDesc); if(_config->FindB("Debug::Acquire::Transaction", false) == true) - std::clog << "New pkgIndex with TransactionID " - << TransactionID << std::endl; + std::clog << "New pkgIndex with TransactionManager " + << TransactionManager << std::endl; } /*}}}*/ // AcqIndex::AcqIndex - Constructor /*{{{*/ -// --------------------------------------------------------------------- pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, - unsigned long TransactionID, + pkgAcqMetaBase *TransactionManager, IndexTarget const *Target, - HashStringList const &ExpectedHash, + HashStringList const &ExpectedHash, indexRecords *MetaIndexParser) - : pkgAcqBaseIndex(Owner, TransactionID, Target, ExpectedHash, - MetaIndexParser), RealURI(Target->URI) + : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHash, + MetaIndexParser) { + RealURI = Target->URI; + // autoselect the compression method AutoSelectCompression(); Init(Target->URI, Target->Description, Target->ShortDesc); if(_config->FindB("Debug::Acquire::Transaction", false) == true) - std::clog << "New pkgIndex with TransactionID " - << TransactionID << std::endl; + std::clog << "New pkgIndex with TransactionManager " + << TransactionManager << std::endl; } /*}}}*/ // AcqIndex::AutoSelectCompression - Select compression /*{{{*/ -// --------------------------------------------------------------------- void pkgAcqIndex::AutoSelectCompression() { std::vector types = APT::Configuration::getCompressionTypes(); - CompressionExtension = ""; + CompressionExtensions = ""; if (ExpectedHashes.usable()) { - for (std::vector::const_iterator t = types.begin(); t != types.end(); ++t) - if (*t == "uncompressed" || MetaIndexParser->Exists(string(Target->MetaKey).append(".").append(*t)) == true) - CompressionExtension.append(*t).append(" "); + for (std::vector::const_iterator t = types.begin(); + t != types.end(); ++t) + { + std::string CompressedMetaKey = string(Target->MetaKey).append(".").append(*t); + if (*t == "uncompressed" || + MetaIndexParser->Exists(CompressedMetaKey) == true) + CompressionExtensions.append(*t).append(" "); + } } else { for (std::vector::const_iterator t = types.begin(); t != types.end(); ++t) - CompressionExtension.append(*t).append(" "); + CompressionExtensions.append(*t).append(" "); } - if (CompressionExtension.empty() == false) - CompressionExtension.erase(CompressionExtension.end()-1); + if (CompressionExtensions.empty() == false) + CompressionExtensions.erase(CompressionExtensions.end()-1); } + /*}}}*/ // AcqIndex::Init - defered Constructor /*{{{*/ -void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &ShortDesc) { - Decompression = false; - Erase = false; +void pkgAcqIndex::Init(string const &URI, string const &URIDesc, + string const &ShortDesc) +{ + Stage = STAGE_DOWNLOAD; - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(URI); + DestFile = GetPartialFileNameFromURI(URI); - std::string const comprExt = CompressionExtension.substr(0, CompressionExtension.find(' ')); - if (comprExt == "uncompressed") + CurrentCompressionExtension = CompressionExtensions.substr(0, CompressionExtensions.find(' ')); + if (CurrentCompressionExtension == "uncompressed") { Desc.URI = URI; if(Target) @@ -1029,9 +1180,10 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S } else { - Desc.URI = URI + '.' + comprExt; + Desc.URI = URI + '.' + CurrentCompressionExtension; + DestFile = DestFile + '.' + CurrentCompressionExtension; if(Target) - MetaKey = string(Target->MetaKey) + '.' + comprExt; + MetaKey = string(Target->MetaKey) + '.' + CurrentCompressionExtension; } // load the filesize @@ -1040,7 +1192,7 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); if(Record) FileSize = Record->Size; - + InitByHashIfNeeded(MetaKey); } @@ -1052,8 +1204,6 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc, string const &S } /*}}}*/ // AcqIndex::AdjustForByHash - modify URI for by-hash support /*{{{*/ -// --------------------------------------------------------------------- -/* */ void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey) { // TODO: @@ -1088,16 +1238,9 @@ void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey) /* The only header we use is the last-modified header. */ string pkgAcqIndex::Custom600Headers() const { - string Final = _config->FindDir("Dir::State::lists"); - Final += URItoFileName(RealURI); - if (_config->FindB("Acquire::GzipIndexes",false)) - Final += ".gz"; - + string Final = GetFinalFilename(); + string msg = "\nIndex-File: true"; - // FIXME: this really should use "IndexTarget::IsOptional()" but that - // seems to be difficult without breaking ABI - if (ShortDesc().find("Translation") != 0) - msg += "\nFail-Ignore: true"; struct stat Buf; if (stat(Final.c_str(),&Buf) == 0) msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); @@ -1105,61 +1248,89 @@ string pkgAcqIndex::Custom600Headers() const return msg; } /*}}}*/ -// pkgAcqIndex::Failed - getting the indexfile failed /*{{{*/ -// --------------------------------------------------------------------- -/* */ -void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/ +// pkgAcqIndex::Failed - getting the indexfile failed /*{{{*/ +void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) { - size_t const nextExt = CompressionExtension.find(' '); + size_t const nextExt = CompressionExtensions.find(' '); if (nextExt != std::string::npos) { - CompressionExtension = CompressionExtension.substr(nextExt+1); + CompressionExtensions = CompressionExtensions.substr(nextExt+1); Init(RealURI, Desc.Description, Desc.ShortDesc); return; } // on decompression failure, remove bad versions in partial/ - if (Decompression && Erase) { - string s = _config->FindDir("Dir::State::lists") + "partial/"; - s.append(URItoFileName(RealURI)); - unlink(s.c_str()); + if (Stage == STAGE_DECOMPRESS_AND_VERIFY) + { + unlink(EraseFileName.c_str()); } Item::Failed(Message,Cnf); /// cancel the entire transaction - Owner->AbortTransaction(TransactionID); + TransactionManager->AbortTransaction(); } /*}}}*/ -// pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/ -// --------------------------------------------------------------------- -/* */ -std::string pkgAcqIndex::GetFinalFilename(std::string const &URI, - std::string const &compExt) +// pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/ +std::string pkgAcqIndex::GetFinalFilename() const { std::string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(URI); - if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz") - FinalFile += ".gz"; + FinalFile += URItoFileName(RealURI); + if (_config->FindB("Acquire::GzipIndexes",false) == true) + FinalFile += '.' + CurrentCompressionExtension; return FinalFile; } - /*}}}*/ -// AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/ -// --------------------------------------------------------------------- -/* */ -void pkgAcqIndex::ReverifyAfterIMS(std::string const &FileName) + /*}}}*/ +// AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/ +void pkgAcqIndex::ReverifyAfterIMS() { - std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' ')); - if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz") - DestFile += ".gz"; + // update destfile to *not* include the compression extension when doing + // a reverify (as its uncompressed on disk already) + DestFile = GetPartialFileNameFromURI(RealURI); + + // do not reverify cdrom sources as apt-cdrom may rewrite the Packages + // file when its doing the indexcopy + if (RealURI.substr(0,6) == "cdrom:") + return; + + // adjust DestFile if its compressed on disk + if (_config->FindB("Acquire::GzipIndexes",false) == true) + DestFile += '.' + CurrentCompressionExtension; // copy FinalFile into partial/ so that we check the hash again - string FinalFile = GetFinalFilename(RealURI, compExt); - Decompression = true; + string FinalFile = GetFinalFilename(); + Stage = STAGE_DECOMPRESS_AND_VERIFY; Desc.URI = "copy:" + FinalFile; QueueURI(Desc); } - /*}}}*/ + /*}}}*/ +// AcqIndex::ValidateFile - Validate the content of the downloaded file /*{{{*/ +bool pkgAcqIndex::ValidateFile(const std::string &FileName) +{ + // FIXME: this can go away once we only ever download stuff that + // has a valid hash and we never do GET based probing + // FIXME2: this also leaks debian-isms into the code and should go therefore + + /* Always validate the index file for correctness (all indexes must + * have a Package field) (LP: #346386) (Closes: #627642) + */ + FileFd fd(FileName, FileFd::ReadOnly, FileFd::Extension); + // Only test for correctness if the content of the file is not empty + // (empty is ok) + if (fd.Size() > 0) + { + pkgTagSection sec; + pkgTagFile tag(&fd); + + // all our current indexes have a field 'Package' in each section + if (_error->PendingError() == true || + tag.Step(sec) == false || + sec.Exists("Package") == false) + return false; + } + return true; +} + /*}}}*/ // AcqIndex::Done - Finished a fetch /*{{{*/ // --------------------------------------------------------------------- /* This goes through a number of states.. On the initial fetch the @@ -1167,92 +1338,50 @@ void pkgAcqIndex::ReverifyAfterIMS(std::string const &FileName) to the uncompressed version of the file. If this is so the file is copied into the partial directory. In all other cases the file is decompressed with a compressed uri. */ -void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, +void pkgAcqIndex::Done(string Message, + unsigned long long Size, + HashStringList const &Hashes, pkgAcquire::MethodConfig *Cfg) { Item::Done(Message,Size,Hashes,Cfg); - std::string const compExt = CompressionExtension.substr(0, CompressionExtension.find(' ')); - if (Decompression == true) + switch(Stage) { - if (ExpectedHashes.usable() && ExpectedHashes != Hashes) - { - RenameOnError(HashSumMismatch); - printHashSumComparision(RealURI, ExpectedHashes, Hashes); - Failed(Message, Cfg); - return; - } - - // FIXME: this can go away once we only ever download stuff that - // has a valid hash and we never do GET based probing - // - /* Always verify the index file for correctness (all indexes must - * have a Package field) (LP: #346386) (Closes: #627642) - */ - FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Extension); - // Only test for correctness if the content of the file is not empty - // (empty is ok) - if (fd.Size() > 0) - { - pkgTagSection sec; - pkgTagFile tag(&fd); - - // all our current indexes have a field 'Package' in each section - if (_error->PendingError() == true || tag.Step(sec) == false || sec.Exists("Package") == false) - { - RenameOnError(InvalidFormat); - Failed(Message, Cfg); - return; - } - } - - // FIXME: can we void the "Erase" bool here as its very non-local? - std::string CompressedFile = _config->FindDir("Dir::State::lists") + "partial/"; - CompressedFile += URItoFileName(RealURI); - // Remove the compressed version. - if (Erase == true) - unlink(CompressedFile.c_str()); - - // Done, queue for rename on transaction finished - PartialFile = DestFile; - DestFile = GetFinalFilename(RealURI, compExt); - - return; + case STAGE_DOWNLOAD: + StageDownloadDone(Message, Hashes, Cfg); + break; + case STAGE_DECOMPRESS_AND_VERIFY: + StageDecompressDone(Message, Hashes, Cfg); + break; } - - // FIXME: use the same method to find - // check the compressed hash too - if(MetaKey != "" && Hashes.size() > 0) +} + /*}}}*/ +// AcqIndex::StageDownloadDone - Queue for decompress and verify /*{{{*/ +void pkgAcqIndex::StageDownloadDone(string Message, + HashStringList const &Hashes, + pkgAcquire::MethodConfig *Cfg) +{ + // First check if the calculcated Hash of the (compressed) downloaded + // file matches the hash we have in the MetaIndexRecords for this file + if(VerifyHashByMetaKey(Hashes) == false) { - indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); - if(Record && Record->Hashes.usable() && Hashes != Record->Hashes) - { - RenameOnError(HashSumMismatch); - printHashSumComparision(RealURI, Record->Hashes, Hashes); - Failed(Message, Cfg); - return; - } + RenameOnError(HashSumMismatch); + Failed(Message, Cfg); + return; } - Erase = false; Complete = true; - + // Handle the unzipd case string FileName = LookupTag(Message,"Alt-Filename"); if (FileName.empty() == false) { - // The files timestamp matches - if (StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false) == true) - { - ReverifyAfterIMS(FileName); - return; - } - Decompression = true; + Stage = STAGE_DECOMPRESS_AND_VERIFY; Local = true; DestFile += ".decomp"; Desc.URI = "copy:" + FileName; QueueURI(Desc); - Mode = "copy"; + SetActiveSubprocess("copy"); return; } @@ -1263,75 +1392,106 @@ void pkgAcqIndex::Done(string Message,unsigned long long Size,HashStringList con ErrorText = "Method gave a blank filename"; } - // The files timestamp matches - if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) + // Methods like e.g. "file:" will give us a (compressed) FileName that is + // not the "DestFile" we set, in this case we uncompress from the local file + if (FileName != DestFile) + Local = true; + else + EraseFileName = FileName; + + // we need to verify the file against the current Release file again + // on if-modfied-since hit to avoid a stale attack against us + if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) { - ReverifyAfterIMS(FileName); + // The files timestamp matches, reverify by copy into partial/ + EraseFileName = ""; + ReverifyAfterIMS(); return; - } - - if (FileName == DestFile) - Erase = true; - else - Local = true; - - string decompProg; + } - // If we enable compressed indexes and already have gzip, keep it - if (_config->FindB("Acquire::GzipIndexes",false) && compExt == "gz" && !Local) { - // Done, queue for rename on transaction finished - PartialFile = DestFile; - DestFile = GetFinalFilename(RealURI, compExt); + // If we have compressed indexes enabled, queue for hash verification + if (_config->FindB("Acquire::GzipIndexes",false)) + { + DestFile = GetPartialFileNameFromURI(RealURI + '.' + CurrentCompressionExtension); + EraseFileName = ""; + Stage = STAGE_DECOMPRESS_AND_VERIFY; + Desc.URI = "copy:" + FileName; + QueueURI(Desc); + SetActiveSubprocess("copy"); return; } // get the binary name for your used compression type - decompProg = _config->Find(string("Acquire::CompressionTypes::").append(compExt),""); - if(decompProg.empty() == false); - else if(compExt == "uncompressed") + string decompProg; + if(CurrentCompressionExtension == "uncompressed") decompProg = "copy"; - else { - _error->Error("Unsupported extension: %s", compExt.c_str()); + else + decompProg = _config->Find(string("Acquire::CompressionTypes::").append(CurrentCompressionExtension),""); + if(decompProg.empty() == true) + { + _error->Error("Unsupported extension: %s", CurrentCompressionExtension.c_str()); return; } - Decompression = true; + // queue uri for the next stage + Stage = STAGE_DECOMPRESS_AND_VERIFY; DestFile += ".decomp"; Desc.URI = decompProg + ":" + FileName; QueueURI(Desc); + SetActiveSubprocess(decompProg); +} + /*}}}*/ +// pkgAcqIndex::StageDecompressDone - Final verification /*{{{*/ +void pkgAcqIndex::StageDecompressDone(string Message, + HashStringList const &Hashes, + pkgAcquire::MethodConfig *Cfg) +{ + if (ExpectedHashes.usable() && ExpectedHashes != Hashes) + { + Desc.URI = RealURI; + RenameOnError(HashSumMismatch); + printHashSumComparision(RealURI, ExpectedHashes, Hashes); + Failed(Message, Cfg); + return; + } + + if(!ValidateFile(DestFile)) + { + RenameOnError(InvalidFormat); + Failed(Message, Cfg); + return; + } + + // remove the compressed version of the file + unlink(EraseFileName.c_str()); - // FIXME: this points to a c++ string that goes out of scope - Mode = decompProg.c_str(); + // Done, queue for rename on transaction finished + TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); + + return; } /*}}}*/ // AcqIndexTrans::pkgAcqIndexTrans - Constructor /*{{{*/ // --------------------------------------------------------------------- /* The Translation file is added to the queue */ pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, - string URI,string URIDesc,string ShortDesc) - : pkgAcqIndex(Owner, URI, URIDesc, ShortDesc, HashStringList(), "") + string URI,string URIDesc,string ShortDesc) + : pkgAcqIndex(Owner, URI, URIDesc, ShortDesc, HashStringList()) { } - /*}}}*/ -pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, - unsigned long TransactionID, +pkgAcqIndexTrans::pkgAcqIndexTrans(pkgAcquire *Owner, + pkgAcqMetaBase *TransactionManager, IndexTarget const * const Target, - HashStringList const &ExpectedHashes, + HashStringList const &ExpectedHashes, indexRecords *MetaIndexParser) - : pkgAcqIndex(Owner, TransactionID, Target, ExpectedHashes, MetaIndexParser) + : pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser) { - // load the filesize - indexRecords::checkSum *Record = MetaIndexParser->Lookup(string(Target->MetaKey)); - if(Record) - FileSize = Record->Size; } /*}}}*/ // AcqIndexTrans::Custom600Headers - Insert custom request headers /*{{{*/ -// --------------------------------------------------------------------- string pkgAcqIndexTrans::Custom600Headers() const { - string Final = _config->FindDir("Dir::State::lists"); - Final += URItoFileName(RealURI); + string Final = GetFinalFilename(); struct stat Buf; if (stat(Final.c_str(),&Buf) != 0) @@ -1340,46 +1500,132 @@ string pkgAcqIndexTrans::Custom600Headers() const } /*}}}*/ // AcqIndexTrans::Failed - Silence failure messages for missing files /*{{{*/ -// --------------------------------------------------------------------- -/* */ void pkgAcqIndexTrans::Failed(string Message,pkgAcquire::MethodConfig *Cnf) { - size_t const nextExt = CompressionExtension.find(' '); + size_t const nextExt = CompressionExtensions.find(' '); if (nextExt != std::string::npos) { - CompressionExtension = CompressionExtension.substr(nextExt+1); + CompressionExtensions = CompressionExtensions.substr(nextExt+1); Init(RealURI, Desc.Description, Desc.ShortDesc); Status = StatIdle; return; } + Item::Failed(Message,Cnf); + // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor - if (Cnf->LocalOnly == true || + if (Cnf->LocalOnly == true || StringToBool(LookupTag(Message,"Transient-Failure"),false) == false) - { + { // Ignore this Status = StatDone; - Complete = false; - Dequeue(); - return; } +} + /*}}}*/ +// AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/ +void pkgAcqMetaBase::Add(Item *I) +{ + Transaction.push_back(I); +} + /*}}}*/ +// AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/ +void pkgAcqMetaBase::AbortTransaction() +{ + if(_config->FindB("Debug::Acquire::Transaction", false) == true) + std::clog << "AbortTransaction: " << TransactionManager << std::endl; - Item::Failed(Message,Cnf); + // ensure the toplevel is in error state too + for (std::vector::iterator I = Transaction.begin(); + I != Transaction.end(); ++I) + { + if(_config->FindB("Debug::Acquire::Transaction", false) == true) + std::clog << " Cancel: " << (*I)->DestFile << std::endl; + // the transaction will abort, so stop anything that is idle + if ((*I)->Status == pkgAcquire::Item::StatIdle) + (*I)->Status = pkgAcquire::Item::StatDone; + + // kill failed files in partial + if ((*I)->Status == pkgAcquire::Item::StatError) + { + std::string const PartialFile = GetPartialFileName(flNotDir((*I)->DestFile)); + if(FileExists(PartialFile)) + Rename(PartialFile, PartialFile + ".FAILED"); + } + } } /*}}}*/ +// AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/ +bool pkgAcqMetaBase::TransactionHasError() +{ + for (pkgAcquire::ItemIterator I = Transaction.begin(); + I != Transaction.end(); ++I) + if((*I)->Status != pkgAcquire::Item::StatDone && + (*I)->Status != pkgAcquire::Item::StatIdle) + return true; -pkgAcqMetaSigBase::pkgAcqMetaSigBase(pkgAcquire *Owner, - HashStringList const &ExpectedHashes, - unsigned long TransactionID) - : Item(Owner, ExpectedHashes, TransactionID) + return false; +} + /*}}}*/ +// AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/ +void pkgAcqMetaBase::CommitTransaction() { + if(_config->FindB("Debug::Acquire::Transaction", false) == true) + std::clog << "CommitTransaction: " << this << std::endl; + + // move new files into place *and* remove files that are not + // part of the transaction but are still on disk + for (std::vector::iterator I = Transaction.begin(); + I != Transaction.end(); ++I) + { + if((*I)->PartialFile != "") + { + if(_config->FindB("Debug::Acquire::Transaction", false) == true) + std::clog << "mv " << (*I)->PartialFile << " -> "<< (*I)->DestFile << " " + << (*I)->DescURI() << std::endl; + + Rename((*I)->PartialFile, (*I)->DestFile); + ChangeOwnerAndPermissionOfFile("CommitTransaction", (*I)->DestFile.c_str(), "root", "root", 0644); + + } else { + if(_config->FindB("Debug::Acquire::Transaction", false) == true) + std::clog << "rm " + << (*I)->DestFile + << " " + << (*I)->DescURI() + << std::endl; + unlink((*I)->DestFile.c_str()); + } + // mark that this transaction is finished + (*I)->TransactionManager = 0; + } +} + /*}}}*/ +// AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/ +void pkgAcqMetaBase::TransactionStageCopy(Item *I, + const std::string &From, + const std::string &To) +{ + I->PartialFile = From; + I->DestFile = To; +} + /*}}}*/ +// AcqMetaBase::TransactionStageRemoval - Sage a file for removal /*{{{*/ +void pkgAcqMetaBase::TransactionStageRemoval(Item *I, + const std::string &FinalFile) +{ + I->PartialFile = ""; + I->DestFile = FinalFile; } - /*{{{*/ -bool pkgAcqMetaSigBase::GenerateAuthWarning(const std::string &RealURI, - const std::string &Message) + /*}}}*/ +// AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/ +bool pkgAcqMetaBase::CheckStopAuthentication(const std::string &RealURI, + const std::string &Message) { + // FIXME: this entire function can do now that we disallow going to + // a unauthenticated state and can cleanly rollback + string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); - + if(FileExists(Final)) { Status = StatTransientNetworkError; @@ -1403,35 +1649,35 @@ bool pkgAcqMetaSigBase::GenerateAuthWarning(const std::string &RealURI, Desc.Description.c_str(), LookupTag(Message,"Message").c_str()); } - // gpgv method failed + // gpgv method failed ReportMirrorFailure("GPGFailure"); return false; } /*}}}*/ - - -pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, /*{{{*/ - unsigned long TransactionID, +// AcqMetaSig::AcqMetaSig - Constructor /*{{{*/ +pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, + pkgAcqMetaBase *TransactionManager, string URI,string URIDesc,string ShortDesc, string MetaIndexFile, const vector* IndexTargets, indexRecords* MetaIndexParser) : - pkgAcqMetaSigBase(Owner, HashStringList(), TransactionID), RealURI(URI), - MetaIndexParser(MetaIndexParser), MetaIndexFile(MetaIndexFile), - IndexTargets(IndexTargets), AuthPass(false), IMSHit(false) + pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser, + HashStringList(), TransactionManager), + RealURI(URI), MetaIndexFile(MetaIndexFile), URIDesc(URIDesc), + ShortDesc(ShortDesc) { DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(URI); + DestFile += URItoFileName(RealURI); - // remove any partial downloaded sig-file in partial/. - // it may confuse proxies and is too small to warrant a + // remove any partial downloaded sig-file in partial/. + // it may confuse proxies and is too small to warrant a // partial download anyway unlink(DestFile.c_str()); - // set the TransactionID + // set the TransactionManager if(_config->FindB("Debug::Acquire::Transaction", false) == true) - std::clog << "New pkgAcqMetaSig with TransactionID " - << TransactionID << std::endl; + std::clog << "New pkgAcqMetaSig with TransactionManager " + << TransactionManager << std::endl; // Create the item Desc.Description = URIDesc; @@ -1448,135 +1694,142 @@ pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/ /*}}}*/ // pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/ // --------------------------------------------------------------------- -/* The only header we use is the last-modified header. */ string pkgAcqMetaSig::Custom600Headers() const { - string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(RealURI); - - struct stat Buf; - if (stat(FinalFile.c_str(),&Buf) != 0) - return "\nIndex-File: true"; - - return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); + std::string Header = GetCustom600Headers(RealURI); + return Header; } - -void pkgAcqMetaSig::Done(string Message,unsigned long long Size, HashStringList const &Hashes, + /*}}}*/ +// pkgAcqMetaSig::Done - The signature was downloaded/verified /*{{{*/ +// --------------------------------------------------------------------- +/* The only header we use is the last-modified header. */ +void pkgAcqMetaSig::Done(string Message,unsigned long long Size, + HashStringList const &Hashes, pkgAcquire::MethodConfig *Cfg) { Item::Done(Message, Size, Hashes, Cfg); - string FileName = LookupTag(Message,"Filename"); - if (FileName.empty() == true) - { - Status = StatError; - ErrorText = "Method gave a blank filename"; - return; - } - - if (FileName != DestFile) - { - // We have to copy it into place - Local = true; - Desc.URI = "copy:" + FileName; - QueueURI(Desc); - return; - } - - if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) - IMSHit = true; - - // adjust paths if its a ims-hit - if(IMSHit) - { - string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(RealURI); - - DestFile = PartialFile = FinalFile; - } - - // queue for verify if(AuthPass == false) { - AuthPass = true; - Desc.URI = "gpgv:" + DestFile; - DestFile = MetaIndexFile; - QueueURI(Desc); + if(CheckDownloadDone(Message, RealURI) == true) + { + // destfile will be modified to point to MetaIndexFile for the + // gpgv method, so we need to save it here + MetaIndexFileSignature = DestFile; + QueueForSignatureVerify(MetaIndexFile, MetaIndexFileSignature); + } return; } - - // queue to copy the file in place if it was not a ims hit, on ims - // hit the file is already at the right place - if(IMSHit == false) + else { - PartialFile = _config->FindDir("Dir::State::lists") + "partial/"; - PartialFile += URItoFileName(RealURI); - - DestFile = _config->FindDir("Dir::State::lists"); - DestFile += URItoFileName(RealURI); + if(CheckAuthDone(Message, RealURI) == true) + { + std::string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + TransactionManager->TransactionStageCopy(this, MetaIndexFileSignature, FinalFile); + } } - - Complete = true; - } /*}}}*/ void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/ { string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); + // check if we need to fail at this point + if (AuthPass == true && CheckStopAuthentication(RealURI, Message)) + return; + + // FIXME: meh, this is not really elegant + string InReleaseURI = RealURI.replace(RealURI.rfind("Release.gpg"), 12, + "InRelease"); + string FinalInRelease = _config->FindDir("Dir::State::lists") + URItoFileName(InReleaseURI); + + if (RealFileExists(Final) || RealFileExists(FinalInRelease)) + { + std::string downgrade_msg; + strprintf(downgrade_msg, _("The repository '%s' is no longer signed."), + URIDesc.c_str()); + if(_config->FindB("Acquire::AllowDowngradeToInsecureRepositories")) + { + // meh, the users wants to take risks (we still mark the packages + // from this repository as unauthenticated) + _error->Warning("%s", downgrade_msg.c_str()); + _error->Warning(_("This is normally not allowed, but the option " + "Acquire::AllowDowngradeToInsecureRepositories was " + "given to override it.")); + + } else { + _error->Error("%s", downgrade_msg.c_str()); + Rename(MetaIndexFile, MetaIndexFile+".FAILED"); + Item::Failed("Message: " + downgrade_msg, Cnf); + TransactionManager->AbortTransaction(); + return; + } + } + else + _error->Warning(_("The data from '%s' is not signed. Packages " + "from that repository can not be authenticated."), + URIDesc.c_str()); + // this ensures that any file in the lists/ dir is removed by the // transaction - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(RealURI); - PartialFile = ""; + DestFile = GetPartialFileNameFromURI(RealURI); + TransactionManager->TransactionStageRemoval(this, DestFile); - // FIXME: duplicated code from pkgAcqMetaIndex - if (AuthPass == true) + // only allow going further if the users explicitely wants it + if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true) { - bool Stop = GenerateAuthWarning(RealURI, Message); - if(Stop) - return; + // we parse the indexes here because at this point the user wanted + // a repository that may potentially harm him + MetaIndexParser->Load(MetaIndexFile); + QueueIndexes(true); + } + else + { + _error->Error("Use --allow-insecure-repositories to force the update"); } + Item::Failed(Message,Cnf); + // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor - if (Cnf->LocalOnly == true || + if (Cnf->LocalOnly == true || StringToBool(LookupTag(Message,"Transient-Failure"),false) == false) - { + { // Ignore this Status = StatDone; - Complete = false; - Dequeue(); - return; } - Item::Failed(Message,Cnf); } /*}}}*/ pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner, /*{{{*/ - unsigned long TransactionID, + pkgAcqMetaBase *TransactionManager, string URI,string URIDesc,string ShortDesc, string MetaIndexSigURI,string MetaIndexSigURIDesc, string MetaIndexSigShortDesc, const vector* IndexTargets, indexRecords* MetaIndexParser) : - pkgAcqMetaSigBase(Owner, HashStringList(), TransactionID), RealURI(URI), IndexTargets(IndexTargets), - MetaIndexParser(MetaIndexParser), AuthPass(false), IMSHit(false), + pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser, HashStringList(), + TransactionManager), + RealURI(URI), URIDesc(URIDesc), ShortDesc(ShortDesc), MetaIndexSigURI(MetaIndexSigURI), MetaIndexSigURIDesc(MetaIndexSigURIDesc), MetaIndexSigShortDesc(MetaIndexSigShortDesc) { - if(TransactionID == 0) - this->TransactionID = (unsigned long)this; + if(TransactionManager == NULL) + { + this->TransactionManager = this; + this->TransactionManager->Add(this); + } if(_config->FindB("Debug::Acquire::Transaction", false) == true) - std::clog << "New pkgAcqMetaIndex with TransactionID " - << TransactionID << std::endl; + std::clog << "New pkgAcqMetaIndex with TransactionManager " + << this->TransactionManager << std::endl; + Init(URIDesc, ShortDesc); } /*}}}*/ -// pkgAcqMetaIndex::Init - Delayed constructor /*{{{*/ +// pkgAcqMetaIndex::Init - Delayed constructor /*{{{*/ void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc) { - DestFile = _config->FindDir("Dir::State::lists") + "partial/"; - DestFile += URItoFileName(RealURI); + DestFile = GetPartialFileNameFromURI(RealURI); // Create the item Desc.Description = URIDesc; @@ -1588,78 +1841,106 @@ void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc) ExpectedAdditionalItems = IndexTargets->size(); QueueURI(Desc); } + /*}}}*/ // pkgAcqMetaIndex::Custom600Headers - Insert custom request headers /*{{{*/ // --------------------------------------------------------------------- -/* The only header we use is the last-modified header. */ string pkgAcqMetaIndex::Custom600Headers() const { - string Final = _config->FindDir("Dir::State::lists"); - Final += URItoFileName(RealURI); - - struct stat Buf; - if (stat(Final.c_str(),&Buf) != 0) - return "\nIndex-File: true"; - - return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); + return GetCustom600Headers(RealURI); } /*}}}*/ -void pkgAcqMetaIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ +void pkgAcqMetaIndex::Done(string Message,unsigned long long Size, /*{{{*/ + HashStringList const &Hashes, pkgAcquire::MethodConfig *Cfg) { Item::Done(Message,Size,Hashes,Cfg); - // MetaIndexes are done in two passes: one to download the - // metaindex with an appropriate method, and a second to verify it - // with the gpgv method - - if (AuthPass == true) + if(CheckDownloadDone(Message, RealURI)) { - AuthDone(Message); + // we have a Release file, now download the Signature, all further + // verify/queue for additional downloads will be done in the + // pkgAcqMetaSig::Done() code + std::string MetaIndexFile = DestFile; + new pkgAcqMetaSig(Owner, TransactionManager, + MetaIndexSigURI, MetaIndexSigURIDesc, + MetaIndexSigShortDesc, MetaIndexFile, IndexTargets, + MetaIndexParser); - // all cool, move Release file into place - Complete = true; + string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); } - else - { - RetrievalDone(Message); - if (!Complete) - // Still more retrieving to do - return; +} + /*}}}*/ +bool pkgAcqMetaBase::CheckAuthDone(string Message, const string &RealURI) /*{{{*/ +{ + // At this point, the gpgv method has succeeded, so there is a + // valid signature from a key in the trusted keyring. We + // perform additional verification of its contents, and use them + // to verify the indexes we are about to download - if (SigFile == "") - { - // load indexes, the signature will downloaded afterwards - MetaIndexParser->Load(DestFile); - QueueIndexes(true); - } - else - { - // There was a signature file, so pass it to gpgv for - // verification - if (_config->FindB("Debug::pkgAcquire::Auth", false)) - std::cerr << "Metaindex acquired, queueing gpg verification (" - << SigFile << "," << DestFile << ")\n"; - AuthPass = true; - Desc.URI = "gpgv:" + SigFile; - QueueURI(Desc); - Mode = "gpgv"; - return; - } + if (!MetaIndexParser->Load(DestFile)) + { + Status = StatAuthError; + ErrorText = MetaIndexParser->ErrorText; + return false; } - if (Complete == true) + if (!VerifyVendor(Message, RealURI)) { - string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(RealURI); - if (SigFile == DestFile) - SigFile = FinalFile; - // queue for copy in place - PartialFile = DestFile; - DestFile = FinalFile; + return false; } + + if (_config->FindB("Debug::pkgAcquire::Auth", false)) + std::cerr << "Signature verification succeeded: " + << DestFile << std::endl; + + // Download further indexes with verification + // + // it would be really nice if we could simply do + // if (IMSHit == false) QueueIndexes(true) + // and skip the download if the Release file has not changed + // - but right now the list cleaner will needs to be tricked + // to not delete all our packages/source indexes in this case + QueueIndexes(true); + + return true; +} + /*}}}*/ +// pkgAcqMetaBase::GetCustom600Headers - Get header for AcqMetaBase /*{{{*/ +// --------------------------------------------------------------------- +string pkgAcqMetaBase::GetCustom600Headers(const string &RealURI) const +{ + std::string Header = "\nIndex-File: true"; + std::string MaximumSize; + strprintf(MaximumSize, "\nMaximum-Size: %i", + _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000)); + Header += MaximumSize; + + string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + + struct stat Buf; + if (stat(FinalFile.c_str(),&Buf) == 0) + Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); + + return Header; } /*}}}*/ -void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/ +// pkgAcqMetaBase::QueueForSignatureVerify /*{{{*/ +void pkgAcqMetaBase::QueueForSignatureVerify(const std::string &MetaIndexFile, + const std::string &MetaIndexFileSignature) +{ + AuthPass = true; + Desc.URI = "gpgv:" + MetaIndexFileSignature; + DestFile = MetaIndexFile; + QueueURI(Desc); + SetActiveSubprocess("gpgv"); +} + /*}}}*/ +// pkgAcqMetaBase::CheckDownloadDone /*{{{*/ +bool pkgAcqMetaBase::CheckDownloadDone(const std::string &Message, + const std::string &RealURI) { // We have just finished downloading a Release file (it is not // verified yet) @@ -1669,7 +1950,7 @@ void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/ { Status = StatError; ErrorText = "Method gave a blank filename"; - return; + return false; } if (FileName != DestFile) @@ -1677,7 +1958,7 @@ void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/ Local = true; Desc.URI = "copy:" + FileName; QueueURI(Desc); - return; + return false; } // make sure to verify against the right file on I-M-S hit @@ -1686,70 +1967,16 @@ void pkgAcqMetaIndex::RetrievalDone(string Message) /*{{{*/ { string FinalFile = _config->FindDir("Dir::State::lists"); FinalFile += URItoFileName(RealURI); - if (SigFile == DestFile) - { - SigFile = FinalFile; -#if 0 - // constructor of pkgAcqMetaClearSig moved it out of the way, - // now move it back in on IMS hit for the 'old' file - string const OldClearSig = DestFile + ".reverify"; - if (RealFileExists(OldClearSig) == true) - Rename(OldClearSig, FinalFile); -#endif - } DestFile = FinalFile; } - // queue a signature - if(SigFile != DestFile) - new pkgAcqMetaSig(Owner, TransactionID, - MetaIndexSigURI, MetaIndexSigURIDesc, - MetaIndexSigShortDesc, DestFile, IndexTargets, - MetaIndexParser); - + // set Item to complete as the remaining work is all local (verify etc) Complete = true; -} - /*}}}*/ -void pkgAcqMetaIndex::AuthDone(string Message) /*{{{*/ -{ - // At this point, the gpgv method has succeeded, so there is a - // valid signature from a key in the trusted keyring. We - // perform additional verification of its contents, and use them - // to verify the indexes we are about to download - - if (!MetaIndexParser->Load(DestFile)) - { - Status = StatAuthError; - ErrorText = MetaIndexParser->ErrorText; - return; - } - - if (!VerifyVendor(Message)) - { - return; - } - - if (_config->FindB("Debug::pkgAcquire::Auth", false)) - std::cerr << "Signature verification succeeded: " - << DestFile << std::endl; - - // Download further indexes with verification - QueueIndexes(true); - -#if 0 - // is it a clearsigned MetaIndex file? - if (DestFile == SigFile) - return; - // Done, move signature file into position - string VerifiedSigFile = _config->FindDir("Dir::State::lists") + - URItoFileName(RealURI) + ".gpg"; - Rename(SigFile,VerifiedSigFile); - chmod(VerifiedSigFile.c_str(),0644); -#endif + return true; } /*}}}*/ -void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/ +void pkgAcqMetaBase::QueueIndexes(bool verify) /*{{{*/ { bool transInRelease = false; { @@ -1812,17 +2039,13 @@ void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/ if ((*Target)->IsOptional() == true) { - if ((*Target)->IsSubIndex() == true) - new pkgAcqSubIndex(Owner, TransactionID, - (*Target)->URI, (*Target)->Description, - (*Target)->ShortDesc, ExpectedIndexHashes); - else if (transInRelease == false || Record != NULL || compressedAvailable == true) + if (transInRelease == false || Record != NULL || compressedAvailable == true) { if (_config->FindB("Acquire::PDiffs",true) == true && transInRelease == true && MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true) - new pkgAcqDiffIndex(Owner, TransactionID, *Target, ExpectedIndexHashes, MetaIndexParser); + new pkgAcqDiffIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); else - new pkgAcqIndexTrans(Owner, TransactionID, *Target, ExpectedIndexHashes, MetaIndexParser); + new pkgAcqIndexTrans(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); } continue; } @@ -1833,13 +2056,13 @@ void pkgAcqMetaIndex::QueueIndexes(bool verify) /*{{{*/ instead, but passing the required info to it is to much hassle */ if(_config->FindB("Acquire::PDiffs",true) == true && (verify == false || MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true)) - new pkgAcqDiffIndex(Owner, TransactionID, *Target, ExpectedIndexHashes, MetaIndexParser); + new pkgAcqDiffIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); else - new pkgAcqIndex(Owner, TransactionID, *Target, ExpectedIndexHashes, MetaIndexParser); + new pkgAcqIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); } } /*}}}*/ -bool pkgAcqMetaIndex::VerifyVendor(string Message) /*{{{*/ +bool pkgAcqMetaBase::VerifyVendor(string Message, const string &RealURI)/*{{{*/ { string::size_type pos; @@ -1916,119 +2139,80 @@ bool pkgAcqMetaIndex::VerifyVendor(string Message) /*{{{*/ return true; } /*}}}*/ -// pkgAcqMetaIndex::Failed - no Release file present or no signature file present /*{{{*/ -// --------------------------------------------------------------------- -/* */ +// pkgAcqMetaIndex::Failed - no Release file present /*{{{*/ void pkgAcqMetaIndex::Failed(string Message, - pkgAcquire::MethodConfig * /*Cnf*/) + pkgAcquire::MethodConfig * Cnf) { - string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); + pkgAcquire::Item::Failed(Message, Cnf); + Status = StatDone; - if (AuthPass == true) - { - bool Stop = GenerateAuthWarning(RealURI, Message); - if(Stop) - return; - } + string FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); - /* Always move the meta index, even if gpgv failed. This ensures - * that PackageFile objects are correctly filled in */ - if (FileExists(DestFile)) { - string FinalFile = _config->FindDir("Dir::State::lists"); - FinalFile += URItoFileName(RealURI); - /* InRelease files become Release files, otherwise - * they would be considered as trusted later on */ - if (SigFile == DestFile) { - RealURI = RealURI.replace(RealURI.rfind("InRelease"), 9, - "Release"); - FinalFile = FinalFile.replace(FinalFile.rfind("InRelease"), 9, - "Release"); - SigFile = FinalFile; - } + _error->Warning(_("The repository '%s' does not have a Release file. " + "This is deprecated, please contact the owner of the " + "repository."), URIDesc.c_str()); + // No Release file was present so fall + // back to queueing Packages files without verification + // only allow going further if the users explicitely wants it + if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true) + { // Done, queue for rename on transaction finished - PartialFile = DestFile; - DestFile = FinalFile; - } + if (FileExists(DestFile)) + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); - // No Release file was present, or verification failed, so fall - // back to queueing Packages files without verification - QueueIndexes(false); + // queue without any kind of hashsum support + QueueIndexes(false); + } else { + // warn if the repository is unsinged + _error->Error("Use --allow-insecure-repositories to force the update"); + TransactionManager->AbortTransaction(); + Status = StatError; + return; + } } /*}}}*/ - -void pkgAcqMetaIndex::Finished() +void pkgAcqMetaIndex::Finished() /*{{{*/ { if(_config->FindB("Debug::Acquire::Transaction", false) == true) std::clog << "Finished: " << DestFile <TransactionHasError(TransactionID) == false && - TransactionID > 0) - Owner->CommitTransaction(TransactionID); + if(TransactionManager != NULL && + TransactionManager->TransactionHasError() == false) + TransactionManager->CommitTransaction(); } - - + /*}}}*/ pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire *Owner, /*{{{*/ string const &URI, string const &URIDesc, string const &ShortDesc, string const &MetaIndexURI, string const &MetaIndexURIDesc, string const &MetaIndexShortDesc, string const &MetaSigURI, string const &MetaSigURIDesc, string const &MetaSigShortDesc, const vector* IndexTargets, indexRecords* MetaIndexParser) : - pkgAcqMetaIndex(Owner, (unsigned long)this, URI, URIDesc, ShortDesc, MetaSigURI, MetaSigURIDesc,MetaSigShortDesc, IndexTargets, MetaIndexParser), + pkgAcqMetaIndex(Owner, NULL, URI, URIDesc, ShortDesc, MetaSigURI, MetaSigURIDesc,MetaSigShortDesc, IndexTargets, MetaIndexParser), MetaIndexURI(MetaIndexURI), MetaIndexURIDesc(MetaIndexURIDesc), MetaIndexShortDesc(MetaIndexShortDesc), MetaSigURI(MetaSigURI), MetaSigURIDesc(MetaSigURIDesc), MetaSigShortDesc(MetaSigShortDesc) { - SigFile = DestFile; - // index targets + (worst case:) Release/Release.gpg ExpectedAdditionalItems = IndexTargets->size() + 2; -#if 0 - // keep the old InRelease around in case of transistent network errors - string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); - if (RealFileExists(Final) == true) - { - string const LastGoodSig = DestFile + ".reverify"; - Rename(Final,LastGoodSig); - } -#endif } /*}}}*/ pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/ { -#if 0 - // if the file was never queued undo file-changes done in the constructor - if (QueueCounter == 1 && Status == StatIdle && FileSize == 0 && Complete == false) - { - string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI); - string const LastGoodSig = DestFile + ".reverify"; - if (RealFileExists(Final) == false && RealFileExists(LastGoodSig) == true) - Rename(LastGoodSig, Final); - } -#endif } /*}}}*/ // pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/ // --------------------------------------------------------------------- -// FIXME: this can go away once the InRelease file is used widely string pkgAcqMetaClearSig::Custom600Headers() const { - string Final = _config->FindDir("Dir::State::lists"); - Final += URItoFileName(RealURI); - - struct stat Buf; - if (stat(Final.c_str(),&Buf) != 0) - { - if (stat(Final.c_str(),&Buf) != 0) - return "\nIndex-File: true\nFail-Ignore: true\n"; - } - - return "\nIndex-File: true\nFail-Ignore: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); + string Header = GetCustom600Headers(RealURI); + Header += "\nFail-Ignore: true"; + return Header; } /*}}}*/ // pkgAcqMetaClearSig::Done - We got a file /*{{{*/ // --------------------------------------------------------------------- -void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long Size, - HashStringList const &Hashes, +void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long /*Size*/, + HashStringList const &/*Hashes*/, pkgAcquire::MethodConfig *Cnf) { // if we expect a ClearTextSignature (InRelase), ensure that @@ -2037,14 +2221,34 @@ void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long Size, if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile)) { pkgAcquire::Item::Failed(Message, Cnf); - ErrorText = _("Does not start with a cleartext signature"); + RenameOnError(NotClearsigned); + TransactionManager->AbortTransaction(); + return; + } + + if(AuthPass == false) + { + if(CheckDownloadDone(Message, RealURI) == true) + QueueForSignatureVerify(DestFile, DestFile); return; } - pkgAcqMetaIndex::Done(Message, Size, Hashes, Cnf); + else + { + if(CheckAuthDone(Message, RealURI) == true) + { + string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + + // queue for copy in place + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); + } + } } /*}}}*/ void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/ { + Item::Failed(Message, Cnf); + // we failed, we will not get additional items from this method ExpectedAdditionalItems = 0; @@ -2055,19 +2259,54 @@ void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /* // impression (CVE-2012-0214) string FinalFile = _config->FindDir("Dir::State::lists"); FinalFile.append(URItoFileName(RealURI)); - PartialFile = ""; - DestFile = FinalFile; + TransactionManager->TransactionStageRemoval(this, FinalFile); + Status = StatDone; - new pkgAcqMetaIndex(Owner, TransactionID, + new pkgAcqMetaIndex(Owner, TransactionManager, MetaIndexURI, MetaIndexURIDesc, MetaIndexShortDesc, MetaSigURI, MetaSigURIDesc, MetaSigShortDesc, IndexTargets, MetaIndexParser); - if (Cnf->LocalOnly == true || - StringToBool(LookupTag(Message, "Transient-Failure"), false) == false) - Dequeue(); } else - pkgAcqMetaIndex::Failed(Message, Cnf); + { + if(CheckStopAuthentication(RealURI, Message)) + return; + + _error->Warning(_("The data from '%s' is not signed. Packages " + "from that repository can not be authenticated."), + URIDesc.c_str()); + + // No Release file was present, or verification failed, so fall + // back to queueing Packages files without verification + // only allow going further if the users explicitely wants it + if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true) + { + Status = StatDone; + + /* Always move the meta index, even if gpgv failed. This ensures + * that PackageFile objects are correctly filled in */ + if (FileExists(DestFile)) + { + string FinalFile = _config->FindDir("Dir::State::lists"); + FinalFile += URItoFileName(RealURI); + /* InRelease files become Release files, otherwise + * they would be considered as trusted later on */ + RealURI = RealURI.replace(RealURI.rfind("InRelease"), 9, + "Release"); + FinalFile = FinalFile.replace(FinalFile.rfind("InRelease"), 9, + "Release"); + + // Done, queue for rename on transaction finished + TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); + } + QueueIndexes(false); + } else { + // warn if the repository is unsigned + _error->Error("Use --allow-insecure-repositories to force the update"); + TransactionManager->AbortTransaction(); + Status = StatError; + } + } } /*}}}*/ // AcqArchive::AcqArchive - Constructor /*{{{*/ @@ -2245,7 +2484,11 @@ bool pkgAcqArchive::QueueNext() if ((unsigned long long)Buf.st_size > Version->Size) unlink(DestFile.c_str()); else + { PartialSize = Buf.st_size; + std::string SandboxUser = _config->Find("APT::Sandbox::User"); + ChangeOwnerAndPermissionOfFile("pkgAcqArchive::QueueNext",DestFile.c_str(), SandboxUser.c_str(), "root", 0600); + } } // Disables download of archives - useful if no real installation follows, @@ -2301,21 +2544,20 @@ void pkgAcqArchive::Done(string Message,unsigned long long Size, HashStringList return; } - Complete = true; - // Reference filename if (FileName != DestFile) { StoreFilename = DestFile = FileName; Local = true; + Complete = true; return; } - + // Done, move it into position string FinalFile = _config->FindDir("Dir::Cache::Archives"); FinalFile += flNotDir(StoreFilename); Rename(DestFile,FinalFile); - + ChangeOwnerAndPermissionOfFile("pkgAcqArchive::Done", FinalFile.c_str(), "root", "root", 0644); StoreFilename = DestFile = FinalFile; Complete = true; } @@ -2411,7 +2653,11 @@ pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI, HashStringList const &Hashe if ((Size > 0) && (unsigned long long)Buf.st_size > Size) unlink(DestFile.c_str()); else + { PartialSize = Buf.st_size; + std::string SandboxUser = _config->Find("APT::Sandbox::User"); + ChangeOwnerAndPermissionOfFile("pkgAcqFile", DestFile.c_str(), SandboxUser.c_str(), "root", 0600); + } } QueueURI(Desc);