X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/04d0fa8803bcd70437cc1033f333d0992b28a329..5404685b66b92f93da7ded5f8fe44fbabea38ba4:/apt-pkg/acquire.cc diff --git a/apt-pkg/acquire.cc b/apt-pkg/acquire.cc index cbd67055d..29362fb40 100644 --- a/apt-pkg/acquire.cc +++ b/apt-pkg/acquire.cc @@ -5,30 +5,47 @@ Acquire - File Acquiration - The core element for the schedual system is the concept of a named + The core element for the schedule system is the concept of a named queue. Each queue is unique and each queue has a name derived from the - URI. The degree of paralization can be controled by how the queue + URI. The degree of paralization can be controlled by how the queue name is derived from the URI. ##################################################################### */ /*}}}*/ // Include Files /*{{{*/ +#include + #include #include #include #include #include #include +#include -#include - +#include +#include +#include +#include #include #include - +#include +#include + +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include #include + +#include /*}}}*/ using namespace std; @@ -36,32 +53,118 @@ using namespace std; // Acquire::pkgAcquire - Constructor /*{{{*/ // --------------------------------------------------------------------- /* We grab some runtime state from the configuration space */ -pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log) +pkgAcquire::pkgAcquire() : LockFD(-1), d(NULL), Queues(0), Workers(0), Configs(0), Log(NULL), ToFetch(0), + Debug(_config->FindB("Debug::pkgAcquire",false)), + Running(false) { - Queues = 0; - Configs = 0; - Workers = 0; - ToFetch = 0; - Running = false; - - string Mode = _config->Find("Acquire::Queue-Mode","host"); + Initialize(); +} +pkgAcquire::pkgAcquire(pkgAcquireStatus *Progress) : LockFD(-1), d(NULL), Queues(0), Workers(0), + Configs(0), Log(NULL), ToFetch(0), + Debug(_config->FindB("Debug::pkgAcquire",false)), + Running(false) +{ + Initialize(); + SetLog(Progress); +} +void pkgAcquire::Initialize() +{ + string const Mode = _config->Find("Acquire::Queue-Mode","host"); if (strcasecmp(Mode.c_str(),"host") == 0) QueueMode = QueueHost; if (strcasecmp(Mode.c_str(),"access") == 0) - QueueMode = QueueAccess; + QueueMode = QueueAccess; - Debug = _config->FindB("Debug::pkgAcquire",false); - - // This is really a stupid place for this - struct stat St; - if (stat((_config->FindDir("Dir::State::lists") + "partial/").c_str(),&St) != 0 || - S_ISDIR(St.st_mode) == 0) - _error->Error(_("Lists directory %spartial is missing."), - _config->FindDir("Dir::State::lists").c_str()); - if (stat((_config->FindDir("Dir::Cache::Archives") + "partial/").c_str(),&St) != 0 || - S_ISDIR(St.st_mode) == 0) - _error->Error(_("Archive directory %spartial is missing."), - _config->FindDir("Dir::Cache::Archives").c_str()); + // chown the auth.conf file as it will be accessed by our methods + std::string const SandboxUser = _config->Find("APT::Sandbox::User"); + if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it + { + struct passwd const * const pw = getpwnam(SandboxUser.c_str()); + struct group const * const gr = getgrnam("root"); + if (pw != NULL && gr != NULL) + { + std::string const AuthConf = _config->FindFile("Dir::Etc::netrc"); + if(AuthConf.empty() == false && RealFileExists(AuthConf) && + chown(AuthConf.c_str(), pw->pw_uid, gr->gr_gid) != 0) + _error->WarningE("SetupAPTPartialDirectory", "chown to %s:root of file %s failed", SandboxUser.c_str(), AuthConf.c_str()); + } + } +} + /*}}}*/ +// Acquire::GetLock - lock directory and prepare for action /*{{{*/ +static bool SetupAPTPartialDirectory(std::string const &grand, std::string const &parent) +{ + std::string const partial = parent + "partial"; + mode_t const mode = umask(S_IWGRP | S_IWOTH); + bool const creation_fail = (CreateAPTDirectoryIfNeeded(grand, partial) == false && + CreateAPTDirectoryIfNeeded(parent, partial) == false); + umask(mode); + if (creation_fail == true) + return false; + + std::string const SandboxUser = _config->Find("APT::Sandbox::User"); + if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it + { + struct passwd const * const pw = getpwnam(SandboxUser.c_str()); + struct group const * const gr = getgrnam("root"); + if (pw != NULL && gr != NULL) + { + // chown the partial dir + if(chown(partial.c_str(), pw->pw_uid, gr->gr_gid) != 0) + _error->WarningE("SetupAPTPartialDirectory", "chown to %s:root of directory %s failed", SandboxUser.c_str(), partial.c_str()); + } + } + if (chmod(partial.c_str(), 0700) != 0) + _error->WarningE("SetupAPTPartialDirectory", "chmod 0700 of directory %s failed", partial.c_str()); + + return true; +} +bool pkgAcquire::Setup(pkgAcquireStatus *Progress, string const &Lock) +{ + Log = Progress; + if (Lock.empty()) + { + string const listDir = _config->FindDir("Dir::State::lists"); + if (SetupAPTPartialDirectory(_config->FindDir("Dir::State"), listDir) == false) + return _error->Errno("Acquire", _("List directory %spartial is missing."), listDir.c_str()); + string const archivesDir = _config->FindDir("Dir::Cache::Archives"); + if (SetupAPTPartialDirectory(_config->FindDir("Dir::Cache"), archivesDir) == false) + return _error->Errno("Acquire", _("Archives directory %spartial is missing."), archivesDir.c_str()); + return true; + } + return GetLock(Lock); +} +bool pkgAcquire::GetLock(std::string const &Lock) +{ + if (Lock.empty() == true) + return false; + + // check for existence and possibly create auxiliary directories + string const listDir = _config->FindDir("Dir::State::lists"); + string const archivesDir = _config->FindDir("Dir::Cache::Archives"); + + if (Lock == listDir) + { + if (SetupAPTPartialDirectory(_config->FindDir("Dir::State"), listDir) == false) + return _error->Errno("Acquire", _("List directory %spartial is missing."), listDir.c_str()); + } + if (Lock == archivesDir) + { + if (SetupAPTPartialDirectory(_config->FindDir("Dir::Cache"), archivesDir) == false) + return _error->Errno("Acquire", _("Archives directory %spartial is missing."), archivesDir.c_str()); + } + + if (_config->FindB("Debug::NoLocking", false) == true) + return true; + + // Lock the directory this acquire object will work in + if (LockFD != -1) + close(LockFD); + LockFD = ::GetLock(flCombine(Lock, "lock")); + if (LockFD == -1) + return _error->Error(_("Unable to lock directory %s"), Lock.c_str()); + + return true; } /*}}}*/ // Acquire::~pkgAcquire - Destructor /*{{{*/ @@ -70,7 +173,10 @@ pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log) pkgAcquire::~pkgAcquire() { Shutdown(); - + + if (LockFD != -1) + close(LockFD); + while (Configs != 0) { MethodConfig *Jnk = Configs; @@ -84,7 +190,7 @@ pkgAcquire::~pkgAcquire() /* */ void pkgAcquire::Shutdown() { - while (Items.size() != 0) + while (Items.empty() == false) { if (Items[0]->Status == Item::StatFetching) Items[0]->Status = Item::StatError; @@ -123,7 +229,7 @@ void pkgAcquire::Remove(Item *Itm) I = Items.begin(); } else - I++; + ++I; } } /*}}}*/ @@ -141,7 +247,7 @@ void pkgAcquire::Add(Worker *Work) // --------------------------------------------------------------------- /* A worker has died. This can not be done while the select loop is running as it would require that RunFds could handling a changing list state and - it cant.. */ + it can't.. */ void pkgAcquire::Remove(Worker *Work) { if (Running == true) @@ -210,11 +316,19 @@ void pkgAcquire::Dequeue(Item *Itm) { Queue *I = Queues; bool Res = false; - for (; I != 0; I = I->Next) - Res |= I->Dequeue(Itm); - if (Debug == true) clog << "Dequeuing " << Itm->DestFile << endl; + + for (; I != 0; I = I->Next) + { + if (I->Dequeue(Itm)) + { + Res = true; + if (Debug == true) + clog << "Dequeued from " << I->Name << endl; + } + } + if (Res == true) ToFetch--; } @@ -235,9 +349,64 @@ string pkgAcquire::QueueName(string Uri,MethodConfig const *&Config) /* Single-Instance methods get exactly one queue per URI. This is also used for the Access queue method */ if (Config->SingleInstance == true || QueueMode == QueueAccess) - return U.Access; + return U.Access; + + string AccessSchema = U.Access + ':'; + string FullQueueName; + + if (U.Host.empty()) + { + long existing = 0; + // check how many queues exist already and reuse empty ones + for (Queue const *I = Queues; I != 0; I = I->Next) + if (I->Name.compare(0, AccessSchema.length(), AccessSchema) == 0) + { + if (I->Items == nullptr) + return I->Name; + ++existing; + } + +#ifdef _SC_NPROCESSORS_ONLN + long cpuCount = sysconf(_SC_NPROCESSORS_ONLN) * 2; +#else + long cpuCount = 10; +#endif + cpuCount = _config->FindI("Acquire::QueueHost::Limit", cpuCount); + + if (cpuCount <= 0 || existing < cpuCount) + strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), existing); + else + { + long const randomQueue = random() % cpuCount; + strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue); + } + + if (Debug) + clog << "Chose random queue " << FullQueueName << " for " << Uri << endl; + } else + { + FullQueueName = AccessSchema + U.Host; + } + unsigned int Instances = 0, SchemaLength = AccessSchema.length(); + + Queue *I = Queues; + for (; I != 0; I = I->Next) { + // if the queue already exists, re-use it + if (I->Name == FullQueueName) + return FullQueueName; + + if (I->Name.compare(0, SchemaLength, AccessSchema) == 0) + Instances++; + } + + if (Debug) { + clog << "Found " << Instances << " instances of " << U.Access << endl; + } - return U.Access + ':' + U.Host; + if (Instances >= (unsigned int)_config->FindI("Acquire::QueueHost::Limit",10)) + return U.Access; + + return FullQueueName; } /*}}}*/ // Acquire::GetConfig - Fetch the configuration information /*{{{*/ @@ -265,7 +434,7 @@ pkgAcquire::MethodConfig *pkgAcquire::GetConfig(string Access) return 0; /* if a method uses DownloadLimit, we switch to SingleInstance mode */ - if(_config->FindI("Acquire::"+Access+"::DlLimit",0) > 0) + if(_config->FindI("Acquire::"+Access+"::Dl-Limit",0) > 0) Conf->SingleInstance = true; return Conf; @@ -293,20 +462,30 @@ void pkgAcquire::SetFds(int &Fd,fd_set *RSet,fd_set *WSet) } } /*}}}*/ -// Acquire::RunFds - Deal with active FDs /*{{{*/ +// Acquire::RunFds - compatibility remove on next abi/api break /*{{{*/ +void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) +{ + RunFdsSane(RSet, WSet); +} + /*}}}*/ +// Acquire::RunFdsSane - Deal with active FDs /*{{{*/ // --------------------------------------------------------------------- /* Dispatch active FDs over to the proper workers. It is very important that a worker never be erased while this is running! The queue class should never erase a worker except during shutdown processing. */ -void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) +bool pkgAcquire::RunFdsSane(fd_set *RSet,fd_set *WSet) { + bool Res = true; + for (Worker *I = Workers; I != 0; I = I->NextAcquire) { if (I->InFd >= 0 && FD_ISSET(I->InFd,RSet) != 0) - I->InFdReady(); + Res &= I->InFdReady(); if (I->OutFd >= 0 && FD_ISSET(I->OutFd,WSet) != 0) - I->OutFdReady(); + Res &= I->OutFdReady(); } + + return Res; } /*}}}*/ // Acquire::Run - Run the fetch sequence /*{{{*/ @@ -314,8 +493,129 @@ void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) /* This runs the queues. It manages a select loop for all of the Worker tasks. The workers interact with the queues and items to manage the actual fetch. */ +static bool IsAccessibleBySandboxUser(std::string const &filename, bool const ReadWrite) +{ + // you would think this is easily to answer with faccessat, right? Wrong! + // It e.g. gets groups wrong, so the only thing which works reliable is trying + // to open the file we want to open later on… + if (unlikely(filename.empty())) + return true; + + if (ReadWrite == false) + { + errno = 0; + // can we read a file? Note that non-existing files are "fine" + int const fd = open(filename.c_str(), O_RDONLY | O_CLOEXEC); + if (fd == -1 && errno == EACCES) + return false; + close(fd); + return true; + } + else + { + // the file might not exist yet and even if it does we will fix permissions, + // so important is here just that the directory it is in allows that + std::string const dirname = flNotFile(filename); + if (unlikely(dirname.empty())) + return true; + + char const * const filetag = ".apt-acquire-privs-test.XXXXXX"; + std::string const tmpfile_tpl = flCombine(dirname, filetag); + std::unique_ptr tmpfile { strdup(tmpfile_tpl.c_str()), std::free }; + int const fd = mkstemp(tmpfile.get()); + if (fd == -1 && errno == EACCES) + return false; + RemoveFile("IsAccessibleBySandboxUser", tmpfile.get()); + close(fd); + return true; + } +} +static void CheckDropPrivsMustBeDisabled(pkgAcquire const &Fetcher) +{ + if(getuid() != 0) + return; + + std::string const SandboxUser = _config->Find("APT::Sandbox::User"); + if (SandboxUser.empty() || SandboxUser == "root") + return; + + struct passwd const * const pw = getpwnam(SandboxUser.c_str()); + if (pw == NULL) + { + _error->Warning(_("No sandbox user '%s' on the system, can not drop privileges"), SandboxUser.c_str()); + _config->Set("APT::Sandbox::User", ""); + return; + } + + gid_t const old_euid = geteuid(); + gid_t const old_egid = getegid(); + + long const ngroups_max = sysconf(_SC_NGROUPS_MAX); + std::unique_ptr old_gidlist(new gid_t[ngroups_max]); + if (unlikely(old_gidlist == NULL)) + return; + ssize_t old_gidlist_nr; + if ((old_gidlist_nr = getgroups(ngroups_max, old_gidlist.get())) < 0) + { + _error->FatalE("getgroups", "getgroups %lu failed", ngroups_max); + old_gidlist[0] = 0; + old_gidlist_nr = 1; + } + if (setgroups(1, &pw->pw_gid)) + _error->FatalE("setgroups", "setgroups %u failed", pw->pw_gid); + + if (setegid(pw->pw_gid) != 0) + _error->FatalE("setegid", "setegid %u failed", pw->pw_gid); + if (seteuid(pw->pw_uid) != 0) + _error->FatalE("seteuid", "seteuid %u failed", pw->pw_uid); + + for (pkgAcquire::ItemCIterator I = Fetcher.ItemsBegin(); + I != Fetcher.ItemsEnd(); ++I) + { + // no need to drop privileges for a complete file + if ((*I)->Complete == true || (*I)->Status != pkgAcquire::Item::StatIdle) + continue; + + // if destination file is inaccessible all hope is lost for privilege dropping + if (IsAccessibleBySandboxUser((*I)->DestFile, true) == false) + { + _error->WarningE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), + (*I)->DestFile.c_str(), SandboxUser.c_str()); + _config->Set("APT::Sandbox::User", ""); + break; + } + + // if its the source file (e.g. local sources) we might be lucky + // by dropping the dropping only for some methods. + URI const source = (*I)->DescURI(); + if (source.Access == "file" || source.Access == "copy") + { + std::string const conf = "Binary::" + source.Access + "::APT::Sandbox::User"; + if (_config->Exists(conf) == true) + continue; + + if (IsAccessibleBySandboxUser(source.Path, false) == false) + { + _error->NoticeE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), + source.Path.c_str(), SandboxUser.c_str()); + _config->CndSet("Binary::file::APT::Sandbox::User", "root"); + _config->CndSet("Binary::copy::APT::Sandbox::User", "root"); + } + } + } + + if (seteuid(old_euid) != 0) + _error->FatalE("seteuid", "seteuid %u failed", old_euid); + if (setegid(old_egid) != 0) + _error->FatalE("setegid", "setegid %u failed", old_egid); + if (setgroups(old_gidlist_nr, old_gidlist.get())) + _error->FatalE("setgroups", "setgroups %u failed", 0); +} pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) { + _error->PushToStack(); + CheckDropPrivsMustBeDisabled(*this); + Running = true; for (Queue *I = Queues; I != 0; I = I->Next) @@ -351,11 +651,10 @@ pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) _error->Errno("select","Select has failed"); break; } - - RunFds(&RFds,&WFds); - if (_error->PendingError() == true) - break; - + + if(RunFdsSane(&RFds,&WFds) == false) + break; + // Timeout, notify the log class if (Res == 0 || (Log != 0 && Log->Update == true)) { @@ -379,10 +678,12 @@ pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) I->Shutdown(false); // Shut down the items - for (ItemIterator I = Items.begin(); I != Items.end(); I++) - (*I)->Finished(); - - if (_error->PendingError()) + for (ItemIterator I = Items.begin(); I != Items.end(); ++I) + (*I)->Finished(); + + bool const newError = _error->PendingError(); + _error->MergeWithStack(); + if (newError) return Failed; if (WasCancelled) return Cancelled; @@ -405,7 +706,7 @@ void pkgAcquire::Bump() pkgAcquire::Worker *pkgAcquire::WorkerStep(Worker *I) { return I->NextAcquire; -}; +} /*}}}*/ // Acquire::Clean - Cleans a directory /*{{{*/ // --------------------------------------------------------------------- @@ -413,6 +714,13 @@ pkgAcquire::Worker *pkgAcquire::WorkerStep(Worker *I) if it is part of the download set. */ bool pkgAcquire::Clean(string Dir) { + // non-existing directories are by definition clean… + if (DirectoryExists(Dir) == false) + return true; + + if(Dir == "/") + return _error->Error(_("Clean of %s is not supported"), Dir.c_str()); + DIR *D = opendir(Dir.c_str()); if (D == 0) return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str()); @@ -429,61 +737,67 @@ bool pkgAcquire::Clean(string Dir) // Skip some files.. if (strcmp(Dir->d_name,"lock") == 0 || strcmp(Dir->d_name,"partial") == 0 || + strcmp(Dir->d_name,"lost+found") == 0 || strcmp(Dir->d_name,".") == 0 || strcmp(Dir->d_name,"..") == 0) continue; // Look in the get list ItemCIterator I = Items.begin(); - for (; I != Items.end(); I++) + for (; I != Items.end(); ++I) if (flNotDir((*I)->DestFile) == Dir->d_name) break; // Nothing found, nuke it if (I == Items.end()) - unlink(Dir->d_name); + RemoveFile("Clean", Dir->d_name); }; - chdir(StartDir.c_str()); closedir(D); + if (chdir(StartDir.c_str()) != 0) + return _error->Errno("chdir",_("Unable to change to %s"),StartDir.c_str()); return true; } /*}}}*/ // Acquire::TotalNeeded - Number of bytes to fetch /*{{{*/ // --------------------------------------------------------------------- /* This is the total number of bytes needed */ -double pkgAcquire::TotalNeeded() +APT_PURE unsigned long long pkgAcquire::TotalNeeded() { - double Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); I++) - Total += (*I)->FileSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + return T + I->FileSize; + }); } /*}}}*/ // Acquire::FetchNeeded - Number of bytes needed to get /*{{{*/ // --------------------------------------------------------------------- /* This is the number of bytes that is not local */ -double pkgAcquire::FetchNeeded() +APT_PURE unsigned long long pkgAcquire::FetchNeeded() { - double Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); I++) - if ((*I)->Local == false) - Total += (*I)->FileSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + if (I->Local == false) + return T + I->FileSize; + else + return T; + }); } /*}}}*/ // Acquire::PartialPresent - Number of partial bytes we already have /*{{{*/ // --------------------------------------------------------------------- /* This is the number of bytes that is not local */ -double pkgAcquire::PartialPresent() +APT_PURE unsigned long long pkgAcquire::PartialPresent() { - double Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); I++) - if ((*I)->Local == false) - Total += (*I)->PartialSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + if (I->Local == false) + return T + I->PartialSize; + else + return T; + }); } - + /*}}}*/ // Acquire::UriBegin - Start iterator for the uri list /*{{{*/ // --------------------------------------------------------------------- /* */ @@ -500,32 +814,21 @@ pkgAcquire::UriIterator pkgAcquire::UriEnd() return UriIterator(0); } /*}}}*/ - // Acquire::MethodConfig::MethodConfig - Constructor /*{{{*/ // --------------------------------------------------------------------- /* */ -pkgAcquire::MethodConfig::MethodConfig() +pkgAcquire::MethodConfig::MethodConfig() : d(NULL), Next(0), SingleInstance(false), + Pipeline(false), SendConfig(false), LocalOnly(false), NeedsCleanup(false), + Removable(false) { - SingleInstance = false; - Pipeline = false; - SendConfig = false; - LocalOnly = false; - Removable = false; - Next = 0; } /*}}}*/ - // Queue::Queue - Constructor /*{{{*/ // --------------------------------------------------------------------- /* */ -pkgAcquire::Queue::Queue(string Name,pkgAcquire *Owner) : Name(Name), - Owner(Owner) +pkgAcquire::Queue::Queue(string const &name,pkgAcquire * const owner) : d(NULL), Next(0), + Name(name), Items(0), Workers(0), Owner(owner), PipeDepth(0), MaxPipeDepth(1) { - Items = 0; - Next = 0; - Workers = 0; - MaxPipeDepth = 1; - PipeDepth = 0; } /*}}}*/ // Queue::~Queue - Destructor /*{{{*/ @@ -551,9 +854,12 @@ bool pkgAcquire::Queue::Enqueue(ItemDesc &Item) QItem **I = &Items; // move to the end of the queue and check for duplicates here for (; *I != 0; I = &(*I)->Next) - if (Item.URI == (*I)->URI) + if (Item.URI == (*I)->URI) { - Item.Owner->Status = Item::StatDone; + if (_config->FindB("Debug::pkgAcquire::Worker",false) == true) + std::cerr << " @ Queue: Action combined for " << Item.URI << " and " << (*I)->URI << std::endl; + (*I)->Owners.push_back(Item.Owner); + Item.Owner->Status = (*I)->Owner->Status; return false; } @@ -576,13 +882,13 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner) { if (Owner->Status == pkgAcquire::Item::StatFetching) return _error->Error("Tried to dequeue a fetching object"); - + bool Res = false; - + QItem **I = &Items; for (; *I != 0;) { - if ((*I)->Owner == Owner) + if (Owner == (*I)->Owner) { QItem *Jnk= *I; *I = (*I)->Next; @@ -593,7 +899,7 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner) else I = &(*I)->Next; } - + return Res; } /*}}}*/ @@ -619,7 +925,7 @@ bool pkgAcquire::Queue::Startup() added other source retry to have cycle maintain a pipeline depth on its own. */ if (Cnf->Pipeline == true) - MaxPipeDepth = 10; + MaxPipeDepth = _config->FindI("Acquire::Max-Pipeline-Depth",10); else MaxPipeDepth = 1; } @@ -670,9 +976,12 @@ pkgAcquire::Queue::QItem *pkgAcquire::Queue::FindItem(string URI,pkgAcquire::Wor bool pkgAcquire::Queue::ItemDone(QItem *Itm) { PipeDepth--; - if (Itm->Owner->Status == pkgAcquire::Item::StatFetching) - Itm->Owner->Status = pkgAcquire::Item::StatDone; - + for (QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O) + { + if ((*O)->Status == pkgAcquire::Item::StatFetching) + (*O)->Status = pkgAcquire::Item::StatDone; + } + if (Itm->Owner->QueueCounter <= 1) Owner->Dequeue(Itm->Owner); else @@ -680,7 +989,7 @@ bool pkgAcquire::Queue::ItemDone(QItem *Itm) Dequeue(Itm->Owner); Owner->Bump(); } - + return Cycle(); } /*}}}*/ @@ -695,7 +1004,7 @@ bool pkgAcquire::Queue::Cycle() if (PipeDepth < 0) return _error->Error("Pipedepth failure"); - + // Look for a queable item QItem *I = Items; while (PipeDepth < (signed)MaxPipeDepth) @@ -703,18 +1012,19 @@ bool pkgAcquire::Queue::Cycle() for (; I != 0; I = I->Next) if (I->Owner->Status == pkgAcquire::Item::StatIdle) break; - + // Nothing to do, queue is idle. if (I == 0) return true; - + I->Worker = Workers; - I->Owner->Status = pkgAcquire::Item::StatFetching; + for (auto const &O: I->Owners) + O->Status = pkgAcquire::Item::StatFetching; PipeDepth++; if (Workers->QueueItem(I) == false) return false; } - + return true; } /*}}}*/ @@ -726,11 +1036,98 @@ void pkgAcquire::Queue::Bump() Cycle(); } /*}}}*/ +HashStringList pkgAcquire::Queue::QItem::GetExpectedHashes() const /*{{{*/ +{ + /* each Item can have multiple owners and each owner might have different + hashes, even if that is unlikely in practice and if so at least some + owners will later fail. There is one situation through which is not a + failure and still needs this handling: Two owners who expect the same + file, but one owner only knows the SHA1 while the other only knows SHA256. */ + HashStringList superhsl; + for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O) + { + HashStringList const hsl = (*O)->GetExpectedHashes(); + if (hsl.usable() == false) + continue; + if (superhsl.usable() == false) + superhsl = hsl; + else + { + // we merge both lists - if we find disagreement send no hashes + HashStringList::const_iterator hs = hsl.begin(); + for (; hs != hsl.end(); ++hs) + if (superhsl.push_back(*hs) == false) + break; + if (hs != hsl.end()) + { + superhsl.clear(); + break; + } + } + } + return superhsl; +} + /*}}}*/ +APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const /*{{{*/ +{ + unsigned long long Maximum = std::numeric_limits::max(); + for (auto const &O: Owners) + { + if (O->FileSize == 0) + continue; + Maximum = std::min(Maximum, O->FileSize); + } + if (Maximum == std::numeric_limits::max()) + return 0; + return Maximum; +} + /*}}}*/ +void pkgAcquire::Queue::QItem::SyncDestinationFiles() const /*{{{*/ +{ + /* ensure that the first owner has the best partial file of all and + the rest have (potentially dangling) symlinks to it so that + everything (like progress reporting) finds it easily */ + std::string superfile = Owner->DestFile; + off_t supersize = 0; + for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O) + { + if ((*O)->DestFile == superfile) + continue; + struct stat file; + if (lstat((*O)->DestFile.c_str(),&file) == 0) + { + if ((file.st_mode & S_IFREG) == 0) + RemoveFile("SyncDestinationFiles", (*O)->DestFile); + else if (supersize < file.st_size) + { + supersize = file.st_size; + RemoveFile("SyncDestinationFiles", superfile); + rename((*O)->DestFile.c_str(), superfile.c_str()); + } + else + RemoveFile("SyncDestinationFiles", (*O)->DestFile); + if (symlink(superfile.c_str(), (*O)->DestFile.c_str()) != 0) + { + ; // not a problem per-se and no real alternative + } + } + } +} + /*}}}*/ +std::string pkgAcquire::Queue::QItem::Custom600Headers() const /*{{{*/ +{ + /* The others are relatively easy to merge, but this one? + Lets not merge and see how far we can run with it… + Likely, nobody will ever notice as all the items will + be of the same class and hence generate the same headers. */ + return Owner->Custom600Headers(); +} + /*}}}*/ // AcquireStatus::pkgAcquireStatus - Constructor /*{{{*/ // --------------------------------------------------------------------- /* */ -pkgAcquireStatus::pkgAcquireStatus() : Update(true), MorePulses(false) +pkgAcquireStatus::pkgAcquireStatus() : d(NULL), Percent(-1), Update(true), MorePulses(false) { Start(); } @@ -746,42 +1143,46 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) CurrentBytes = 0; TotalItems = 0; CurrentItems = 0; - + // Compute the total number of bytes to fetch unsigned int Unknown = 0; unsigned int Count = 0; - for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin(); I != Owner->ItemsEnd(); - I++, Count++) + bool ExpectAdditionalItems = false; + for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin(); + I != Owner->ItemsEnd(); + ++I, ++Count) { TotalItems++; if ((*I)->Status == pkgAcquire::Item::StatDone) - CurrentItems++; - - // Totally ignore local items - if ((*I)->Local == true) - continue; + ++CurrentItems; + + // do we expect to acquire more files than we know of yet? + if ((*I)->ExpectedAdditionalItems > 0) + ExpectAdditionalItems = true; TotalBytes += (*I)->FileSize; if ((*I)->Complete == true) CurrentBytes += (*I)->FileSize; if ((*I)->FileSize == 0 && (*I)->Complete == false) - Unknown++; + ++Unknown; } - + // Compute the current completion - unsigned long ResumeSize = 0; + unsigned long long ResumeSize = 0; for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0; I = Owner->WorkerStep(I)) + { if (I->CurrentItem != 0 && I->CurrentItem->Owner->Complete == false) { CurrentBytes += I->CurrentSize; ResumeSize += I->ResumePoint; - + // Files with unknown size always have 100% completion - if (I->CurrentItem->Owner->FileSize == 0 && + if (I->CurrentItem->Owner->FileSize == 0 && I->CurrentItem->Owner->Complete == false) TotalBytes += I->CurrentSize; } + } // Normalize the figures and account for unknown size downloads if (TotalBytes <= 0) @@ -792,11 +1193,11 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) // Wha?! Is not supposed to happen. if (CurrentBytes > TotalBytes) CurrentBytes = TotalBytes; - + // Compute the CPS struct timeval NewTime; gettimeofday(&NewTime,0); - if (NewTime.tv_sec - Time.tv_sec == 6 && NewTime.tv_usec > Time.tv_usec || + if ((NewTime.tv_sec - Time.tv_sec == 6 && NewTime.tv_usec > Time.tv_usec) || NewTime.tv_sec - Time.tv_sec > 6) { double Delta = NewTime.tv_sec - Time.tv_sec + @@ -808,10 +1209,37 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) else CurrentCPS = ((CurrentBytes - ResumeSize) - LastBytes)/Delta; LastBytes = CurrentBytes - ResumeSize; - ElapsedTime = (unsigned long)Delta; + ElapsedTime = (unsigned long long)Delta; Time = NewTime; } + double const OldPercent = Percent; + // calculate the percentage, if we have too little data assume 1% + if (ExpectAdditionalItems) + Percent = 0; + else + // use both files and bytes because bytes can be unreliable + Percent = (0.8 * (CurrentBytes/float(TotalBytes)*100.0) + + 0.2 * (CurrentItems/float(TotalItems)*100.0)); + + // debug + if (_config->FindB("Debug::acquire::progress", false) == true) + { + std::clog + << "[" + << std::setw(5) << std::setprecision(4) << std::showpoint << Percent + << "]" + << " Bytes: " + << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes) + << " # Files: " + << CurrentItems << " / " << TotalItems + << std::endl; + } + + double const DiffPercent = Percent - OldPercent; + if (DiffPercent < 0.001 && _config->FindB("Acquire::Progress::Diffpercent", false) == true) + return true; + int fd = _config->FindI("APT::Status-Fd",-1); if(fd > 0) { @@ -819,23 +1247,20 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) char msg[200]; long i = CurrentItems < TotalItems ? CurrentItems + 1 : CurrentItems; - unsigned long ETA = - (unsigned long)((TotalBytes - CurrentBytes) / CurrentCPS); + unsigned long long ETA = 0; + if(CurrentCPS > 0) + ETA = (TotalBytes - CurrentBytes) / CurrentCPS; // only show the ETA if it makes sense if (ETA > 0 && ETA < 172800 /* two days */ ) snprintf(msg,sizeof(msg), _("Retrieving file %li of %li (%s remaining)"), i, TotalItems, TimeToStr(ETA).c_str()); else snprintf(msg,sizeof(msg), _("Retrieving file %li of %li"), i, TotalItems); - - // build the status str - status << "dlstatus:" << i - << ":" << (CurrentBytes/float(TotalBytes)*100.0) - << ":" << msg - << endl; - write(fd, status.str().c_str(), status.str().size()); + std::string dlstatus; + strprintf(dlstatus, "dlstatus:%ld:%.4f:%s\n", i, Percent, msg); + FileFd::Write(fd, dlstatus.data(), dlstatus.size()); } return true; @@ -876,14 +1301,27 @@ void pkgAcquireStatus::Stop() else CurrentCPS = FetchedBytes/Delta; LastBytes = CurrentBytes; - ElapsedTime = (unsigned int)Delta; + ElapsedTime = (unsigned long long)Delta; } /*}}}*/ // AcquireStatus::Fetched - Called when a byte set has been fetched /*{{{*/ // --------------------------------------------------------------------- /* This is used to get accurate final transfer rate reporting. */ -void pkgAcquireStatus::Fetched(unsigned long Size,unsigned long Resume) +void pkgAcquireStatus::Fetched(unsigned long long Size,unsigned long long Resume) { FetchedBytes += Size - Resume; } /*}}}*/ + +pkgAcquire::UriIterator::UriIterator(pkgAcquire::Queue *Q) : d(NULL), CurQ(Q), CurItem(0) +{ + while (CurItem == 0 && CurQ != 0) + { + CurItem = CurQ->Items; + CurQ = CurQ->Next; + } +} + +APT_CONST pkgAcquire::UriIterator::~UriIterator() {} +APT_CONST pkgAcquire::MethodConfig::~MethodConfig() {} +APT_CONST pkgAcquireStatus::~pkgAcquireStatus() {}