X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/6c55f07a5fa3612a5d59c61a17da5fe640eadc8b..1bae10217617c2f79969635d4387fb2a5fe19ecb:/apt-pkg/acquire.cc diff --git a/apt-pkg/acquire.cc b/apt-pkg/acquire.cc index 5fd378096..7a44d8599 100644 --- a/apt-pkg/acquire.cc +++ b/apt-pkg/acquire.cc @@ -24,16 +24,19 @@ #include #include +#include #include #include #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -41,7 +44,6 @@ #include #include #include -#include #include /*}}}*/ @@ -75,7 +77,7 @@ void pkgAcquire::Initialize() // chown the auth.conf file as it will be accessed by our methods std::string const SandboxUser = _config->Find("APT::Sandbox::User"); - if (getuid() == 0 && SandboxUser.empty() == false) // if we aren't root, we can't chown, so don't try it + if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it { struct passwd const * const pw = getpwnam(SandboxUser.c_str()); struct group const * const gr = getgrnam("root"); @@ -101,7 +103,7 @@ static bool SetupAPTPartialDirectory(std::string const &grand, std::string const return false; std::string const SandboxUser = _config->Find("APT::Sandbox::User"); - if (getuid() == 0 && SandboxUser.empty() == false) // if we aren't root, we can't chown, so don't try it + if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it { struct passwd const * const pw = getpwnam(SandboxUser.c_str()); struct group const * const gr = getgrnam("root"); @@ -349,8 +351,42 @@ string pkgAcquire::QueueName(string Uri,MethodConfig const *&Config) if (Config->SingleInstance == true || QueueMode == QueueAccess) return U.Access; - string AccessSchema = U.Access + ':', - FullQueueName = AccessSchema + U.Host; + string AccessSchema = U.Access + ':'; + string FullQueueName; + + if (U.Host.empty()) + { + long existing = 0; + // check how many queues exist already and reuse empty ones + for (Queue const *I = Queues; I != 0; I = I->Next) + if (I->Name.compare(0, AccessSchema.length(), AccessSchema) == 0) + { + if (I->Items == nullptr) + return I->Name; + ++existing; + } + +#ifdef _SC_NPROCESSORS_ONLN + long cpuCount = sysconf(_SC_NPROCESSORS_ONLN) * 2; +#else + long cpuCount = 10; +#endif + cpuCount = _config->FindI("Acquire::QueueHost::Limit", cpuCount); + + if (cpuCount <= 0 || existing < cpuCount) + strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), existing); + else + { + long const randomQueue = random() % cpuCount; + strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue); + } + + if (Debug) + clog << "Chose random queue " << FullQueueName << " for " << Uri << endl; + } else + { + FullQueueName = AccessSchema + U.Host; + } unsigned int Instances = 0, SchemaLength = AccessSchema.length(); Queue *I = Queues; @@ -426,20 +462,30 @@ void pkgAcquire::SetFds(int &Fd,fd_set *RSet,fd_set *WSet) } } /*}}}*/ -// Acquire::RunFds - Deal with active FDs /*{{{*/ +// Acquire::RunFds - compatibility remove on next abi/api break /*{{{*/ +void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) +{ + RunFdsSane(RSet, WSet); +}; + /*}}}*/ +// Acquire::RunFdsSane - Deal with active FDs /*{{{*/ // --------------------------------------------------------------------- /* Dispatch active FDs over to the proper workers. It is very important that a worker never be erased while this is running! The queue class should never erase a worker except during shutdown processing. */ -void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) +bool pkgAcquire::RunFdsSane(fd_set *RSet,fd_set *WSet) { + bool Res = true; + for (Worker *I = Workers; I != 0; I = I->NextAcquire) { if (I->InFd >= 0 && FD_ISSET(I->InFd,RSet) != 0) - I->InFdReady(); + Res &= I->InFdReady(); if (I->OutFd >= 0 && FD_ISSET(I->OutFd,WSet) != 0) - I->OutFdReady(); + Res &= I->OutFdReady(); } + + return Res; } /*}}}*/ // Acquire::Run - Run the fetch sequence /*{{{*/ @@ -447,8 +493,129 @@ void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) /* This runs the queues. It manages a select loop for all of the Worker tasks. The workers interact with the queues and items to manage the actual fetch. */ +static bool IsAccessibleBySandboxUser(std::string const &filename, bool const ReadWrite) +{ + // you would think this is easily to answer with faccessat, right? Wrong! + // It e.g. gets groups wrong, so the only thing which works reliable is trying + // to open the file we want to open later on… + if (unlikely(filename.empty())) + return true; + + if (ReadWrite == false) + { + errno = 0; + // can we read a file? Note that non-existing files are "fine" + int const fd = open(filename.c_str(), O_RDONLY | O_CLOEXEC); + if (fd == -1 && errno == EACCES) + return false; + close(fd); + return true; + } + else + { + // the file might not exist yet and even if it does we will fix permissions, + // so important is here just that the directory it is in allows that + std::string const dirname = flNotFile(filename); + if (unlikely(dirname.empty())) + return true; + + char const * const filetag = ".apt-acquire-privs-test.XXXXXX"; + std::string const tmpfile_tpl = flCombine(dirname, filetag); + std::unique_ptr tmpfile { strdup(tmpfile_tpl.c_str()), std::free }; + int const fd = mkstemp(tmpfile.get()); + if (fd == -1 && errno == EACCES) + return false; + RemoveFile("IsAccessibleBySandboxUser", tmpfile.get()); + close(fd); + return true; + } +} +static void CheckDropPrivsMustBeDisabled(pkgAcquire const &Fetcher) +{ + if(getuid() != 0) + return; + + std::string const SandboxUser = _config->Find("APT::Sandbox::User"); + if (SandboxUser.empty() || SandboxUser == "root") + return; + + struct passwd const * const pw = getpwnam(SandboxUser.c_str()); + if (pw == NULL) + { + _error->Warning(_("No sandbox user '%s' on the system, can not drop privileges"), SandboxUser.c_str()); + _config->Set("APT::Sandbox::User", ""); + return; + } + + gid_t const old_euid = geteuid(); + gid_t const old_egid = getegid(); + + long const ngroups_max = sysconf(_SC_NGROUPS_MAX); + std::unique_ptr old_gidlist(new gid_t[ngroups_max]); + if (unlikely(old_gidlist == NULL)) + return; + ssize_t old_gidlist_nr; + if ((old_gidlist_nr = getgroups(ngroups_max, old_gidlist.get())) < 0) + { + _error->FatalE("getgroups", "getgroups %lu failed", ngroups_max); + old_gidlist[0] = 0; + old_gidlist_nr = 1; + } + if (setgroups(1, &pw->pw_gid)) + _error->FatalE("setgroups", "setgroups %u failed", pw->pw_gid); + + if (setegid(pw->pw_gid) != 0) + _error->FatalE("setegid", "setegid %u failed", pw->pw_gid); + if (seteuid(pw->pw_uid) != 0) + _error->FatalE("seteuid", "seteuid %u failed", pw->pw_uid); + + for (pkgAcquire::ItemCIterator I = Fetcher.ItemsBegin(); + I != Fetcher.ItemsEnd(); ++I) + { + // no need to drop privileges for a complete file + if ((*I)->Complete == true || (*I)->Status != pkgAcquire::Item::StatIdle) + continue; + + // if destination file is inaccessible all hope is lost for privilege dropping + if (IsAccessibleBySandboxUser((*I)->DestFile, true) == false) + { + _error->WarningE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), + (*I)->DestFile.c_str(), SandboxUser.c_str()); + _config->Set("APT::Sandbox::User", ""); + break; + } + + // if its the source file (e.g. local sources) we might be lucky + // by dropping the dropping only for some methods. + URI const source = (*I)->DescURI(); + if (source.Access == "file" || source.Access == "copy") + { + std::string const conf = "Binary::" + source.Access + "::APT::Sandbox::User"; + if (_config->Exists(conf) == true) + continue; + + if (IsAccessibleBySandboxUser(source.Path, false) == false) + { + _error->NoticeE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), + source.Path.c_str(), SandboxUser.c_str()); + _config->CndSet("Binary::file::APT::Sandbox::User", "root"); + _config->CndSet("Binary::copy::APT::Sandbox::User", "root"); + } + } + } + + if (seteuid(old_euid) != 0) + _error->FatalE("seteuid", "seteuid %u failed", old_euid); + if (setegid(old_egid) != 0) + _error->FatalE("setegid", "setegid %u failed", old_egid); + if (setgroups(old_gidlist_nr, old_gidlist.get())) + _error->FatalE("setgroups", "setgroups %u failed", 0); +} pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) { + _error->PushToStack(); + CheckDropPrivsMustBeDisabled(*this); + Running = true; for (Queue *I = Queues; I != 0; I = I->Next) @@ -484,11 +651,10 @@ pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) _error->Errno("select","Select has failed"); break; } - - RunFds(&RFds,&WFds); - if (_error->PendingError() == true) - break; - + + if(RunFdsSane(&RFds,&WFds) == false) + break; + // Timeout, notify the log class if (Res == 0 || (Log != 0 && Log->Update == true)) { @@ -513,9 +679,11 @@ pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) // Shut down the items for (ItemIterator I = Items.begin(); I != Items.end(); ++I) - (*I)->Finished(); - - if (_error->PendingError()) + (*I)->Finished(); + + bool const newError = _error->PendingError(); + _error->MergeWithStack(); + if (newError) return Failed; if (WasCancelled) return Cancelled; @@ -569,6 +737,7 @@ bool pkgAcquire::Clean(string Dir) // Skip some files.. if (strcmp(Dir->d_name,"lock") == 0 || strcmp(Dir->d_name,"partial") == 0 || + strcmp(Dir->d_name,"lost+found") == 0 || strcmp(Dir->d_name,".") == 0 || strcmp(Dir->d_name,"..") == 0) continue; @@ -581,7 +750,7 @@ bool pkgAcquire::Clean(string Dir) // Nothing found, nuke it if (I == Items.end()) - unlink(Dir->d_name); + RemoveFile("Clean", Dir->d_name); }; closedir(D); @@ -595,10 +764,10 @@ bool pkgAcquire::Clean(string Dir) /* This is the total number of bytes needed */ APT_PURE unsigned long long pkgAcquire::TotalNeeded() { - unsigned long long Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I) - Total += (*I)->FileSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + return T + I->FileSize; + }); } /*}}}*/ // Acquire::FetchNeeded - Number of bytes needed to get /*{{{*/ @@ -606,11 +775,13 @@ APT_PURE unsigned long long pkgAcquire::TotalNeeded() /* This is the number of bytes that is not local */ APT_PURE unsigned long long pkgAcquire::FetchNeeded() { - unsigned long long Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I) - if ((*I)->Local == false) - Total += (*I)->FileSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + if (I->Local == false) + return T + I->FileSize; + else + return T; + }); } /*}}}*/ // Acquire::PartialPresent - Number of partial bytes we already have /*{{{*/ @@ -618,11 +789,13 @@ APT_PURE unsigned long long pkgAcquire::FetchNeeded() /* This is the number of bytes that is not local */ APT_PURE unsigned long long pkgAcquire::PartialPresent() { - unsigned long long Total = 0; - for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I) - if ((*I)->Local == false) - Total += (*I)->PartialSize; - return Total; + return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, + [](unsigned long long const T, Item const * const I) { + if (I->Local == false) + return T + I->PartialSize; + else + return T; + }); } /*}}}*/ // Acquire::UriBegin - Start iterator for the uri list /*{{{*/ @@ -680,9 +853,8 @@ bool pkgAcquire::Queue::Enqueue(ItemDesc &Item) { QItem **I = &Items; // move to the end of the queue and check for duplicates here - HashStringList const hsl = Item.Owner->GetExpectedHashes(); for (; *I != 0; I = &(*I)->Next) - if (Item.URI == (*I)->URI || hsl == (*I)->Owner->GetExpectedHashes()) + if (Item.URI == (*I)->URI) { if (_config->FindB("Debug::pkgAcquire::Worker",false) == true) std::cerr << " @ Queue: Action combined for " << Item.URI << " and " << (*I)->URI << std::endl; @@ -846,8 +1018,8 @@ bool pkgAcquire::Queue::Cycle() return true; I->Worker = Workers; - for (QItem::owner_iterator O = I->Owners.begin(); O != I->Owners.end(); ++O) - (*O)->Status = pkgAcquire::Item::StatFetching; + for (auto const &O: I->Owners) + O->Status = pkgAcquire::Item::StatFetching; PipeDepth++; if (Workers->QueueItem(I) == false) return false; @@ -899,11 +1071,11 @@ HashStringList pkgAcquire::Queue::QItem::GetExpectedHashes() const /*{{{*/ APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const /*{{{*/ { unsigned long long Maximum = std::numeric_limits::max(); - for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O) + for (auto const &O: Owners) { - if ((*O)->FileSize == 0) + if (O->FileSize == 0) continue; - Maximum = std::min(Maximum, (*O)->FileSize); + Maximum = std::min(Maximum, O->FileSize); } if (Maximum == std::numeric_limits::max()) return 0; @@ -925,15 +1097,15 @@ void pkgAcquire::Queue::QItem::SyncDestinationFiles() const /*{{{*/ if (lstat((*O)->DestFile.c_str(),&file) == 0) { if ((file.st_mode & S_IFREG) == 0) - unlink((*O)->DestFile.c_str()); + RemoveFile("SyncDestinationFiles", (*O)->DestFile); else if (supersize < file.st_size) { supersize = file.st_size; - unlink(superfile.c_str()); + RemoveFile("SyncDestinationFiles", superfile); rename((*O)->DestFile.c_str(), superfile.c_str()); } else - unlink((*O)->DestFile.c_str()); + RemoveFile("SyncDestinationFiles", (*O)->DestFile); if (symlink(superfile.c_str(), (*O)->DestFile.c_str()) != 0) { ; // not a problem per-se and no real alternative @@ -971,11 +1143,11 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) CurrentBytes = 0; TotalItems = 0; CurrentItems = 0; - + // Compute the total number of bytes to fetch unsigned int Unknown = 0; unsigned int Count = 0; - bool UnfetchedReleaseFiles = false; + bool ExpectAdditionalItems = false; for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin(); I != Owner->ItemsEnd(); ++I, ++Count) @@ -983,17 +1155,10 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) TotalItems++; if ((*I)->Status == pkgAcquire::Item::StatDone) ++CurrentItems; - - // Totally ignore local items - if ((*I)->Local == true) - continue; - - // see if the method tells us to expect more - TotalItems += (*I)->ExpectedAdditionalItems; - // check if there are unfetched Release files - if ((*I)->Complete == false && (*I)->ExpectedAdditionalItems > 0) - UnfetchedReleaseFiles = true; + // do we expect to acquire more files than we know of yet? + if ((*I)->ExpectedAdditionalItems > 0) + ExpectAdditionalItems = true; TotalBytes += (*I)->FileSize; if ((*I)->Complete == true) @@ -1001,7 +1166,7 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) if ((*I)->FileSize == 0 && (*I)->Complete == false) ++Unknown; } - + // Compute the current completion unsigned long long ResumeSize = 0; for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0; @@ -1029,12 +1194,6 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) if (CurrentBytes > TotalBytes) CurrentBytes = TotalBytes; - // debug - if (_config->FindB("Debug::acquire::progress", false) == true) - std::clog << " Bytes: " - << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes) - << std::endl; - // Compute the CPS struct timeval NewTime; gettimeofday(&NewTime,0); @@ -1056,12 +1215,27 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) double const OldPercent = Percent; // calculate the percentage, if we have too little data assume 1% - if (TotalBytes > 0 && UnfetchedReleaseFiles) + if (ExpectAdditionalItems) Percent = 0; else // use both files and bytes because bytes can be unreliable Percent = (0.8 * (CurrentBytes/float(TotalBytes)*100.0) + 0.2 * (CurrentItems/float(TotalItems)*100.0)); + + // debug + if (_config->FindB("Debug::acquire::progress", false) == true) + { + std::clog + << "[" + << std::setw(5) << std::setprecision(4) << std::showpoint << Percent + << "]" + << " Bytes: " + << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes) + << " # Files: " + << CurrentItems << " / " << TotalItems + << std::endl; + } + double const DiffPercent = Percent - OldPercent; if (DiffPercent < 0.001 && _config->FindB("Acquire::Progress::Diffpercent", false) == true) return true;