#include <apt-pkg/fileutl.h>
#include <algorithm>
+#include <numeric>
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
#include <iomanip>
+#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <fcntl.h>
#include <pwd.h>
#include <grp.h>
#include <dirent.h>
#include <sys/select.h>
#include <errno.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <apti18n.h>
/*}}}*/
// chown the auth.conf file as it will be accessed by our methods
std::string const SandboxUser = _config->Find("APT::Sandbox::User");
- if (getuid() == 0 && SandboxUser.empty() == false) // if we aren't root, we can't chown, so don't try it
+ if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
{
struct passwd const * const pw = getpwnam(SandboxUser.c_str());
struct group const * const gr = getgrnam("root");
return false;
std::string const SandboxUser = _config->Find("APT::Sandbox::User");
- if (getuid() == 0 && SandboxUser.empty() == false) // if we aren't root, we can't chown, so don't try it
+ if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
{
struct passwd const * const pw = getpwnam(SandboxUser.c_str());
struct group const * const gr = getgrnam("root");
it is constructed which creates a queue (based on the current queue
mode) and puts the item in that queue. If the system is running then
the queue might be started. */
+static bool DoesAcquireResultInInstantFailure(pkgAcquire::Item * const Item,
+ pkgAcquire::MethodConfig const * const Config, pkgAcquireStatus * const Log)
+{
+ auto SavedDesc = Item->GetItemDesc();
+ if (Item->IsRedirectionLoop(SavedDesc.URI))
+ {
+ std::string const Message = "400 URI Failure"
+ "\nURI: " + SavedDesc.URI +
+ "\nFilename: " + Item->DestFile +
+ "\nFailReason: RedirectionLoop";
+
+ Item->Status = pkgAcquire::Item::StatError;
+ Item->Failed(Message, Config);
+ if (Log != nullptr)
+ Log->Fail(SavedDesc);
+ return true;
+ }
+
+ HashStringList const hsl = Item->GetExpectedHashes();
+ if (hsl.usable() == false && Item->HashesRequired() &&
+ _config->Exists("Acquire::ForceHash") == false)
+ {
+ std::string const Message = "400 URI Failure"
+ "\nURI: " + SavedDesc.URI +
+ "\nFilename: " + Item->DestFile +
+ "\nFailReason: WeakHashSums";
+
+ auto SavedDesc = Item->GetItemDesc();
+ Item->Status = pkgAcquire::Item::StatAuthError;
+ Item->Failed(Message, Config);
+ if (Log != nullptr)
+ Log->Fail(SavedDesc);
+ return true;
+ }
+ return false;
+}
void pkgAcquire::Enqueue(ItemDesc &Item)
{
// Determine which queue to put the item in
if (Name.empty() == true)
return;
+ /* the check for running avoids that we produce errors
+ in logging before we actually have started, which would
+ be easier to implement but would confuse users/implementations
+ so we check the items skipped here in #Startup */
+ if (Running && DoesAcquireResultInInstantFailure(Item.Owner, Config, Log))
+ return;
+
// Find the queue structure
Queue *I = Queues;
for (; I != 0 && I->Name != Name; I = I->Next);
if (Config->SingleInstance == true || QueueMode == QueueAccess)
return U.Access;
- string AccessSchema = U.Access + ':',
- FullQueueName = AccessSchema + U.Host;
+ string AccessSchema = U.Access + ':';
+ string FullQueueName;
+
+ if (U.Host.empty())
+ {
+ long existing = 0;
+ // check how many queues exist already and reuse empty ones
+ for (Queue const *I = Queues; I != 0; I = I->Next)
+ if (I->Name.compare(0, AccessSchema.length(), AccessSchema) == 0)
+ {
+ if (I->Items == nullptr)
+ return I->Name;
+ ++existing;
+ }
+
+#ifdef _SC_NPROCESSORS_ONLN
+ long cpuCount = sysconf(_SC_NPROCESSORS_ONLN) * 2;
+#else
+ long cpuCount = 10;
+#endif
+ cpuCount = _config->FindI("Acquire::QueueHost::Limit", cpuCount);
+
+ if (cpuCount <= 0 || existing < cpuCount)
+ strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), existing);
+ else
+ {
+ long const randomQueue = random() % cpuCount;
+ strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue);
+ }
+
+ if (Debug)
+ clog << "Chose random queue " << FullQueueName << " for " << Uri << endl;
+ } else
+ {
+ FullQueueName = AccessSchema + U.Host;
+ }
unsigned int Instances = 0, SchemaLength = AccessSchema.length();
Queue *I = Queues;
}
}
/*}}}*/
-// Acquire::RunFds - Deal with active FDs /*{{{*/
+// Acquire::RunFds - compatibility remove on next abi/api break /*{{{*/
+void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet)
+{
+ RunFdsSane(RSet, WSet);
+}
+ /*}}}*/
+// Acquire::RunFdsSane - Deal with active FDs /*{{{*/
// ---------------------------------------------------------------------
/* Dispatch active FDs over to the proper workers. It is very important
that a worker never be erased while this is running! The queue class
should never erase a worker except during shutdown processing. */
-void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet)
+bool pkgAcquire::RunFdsSane(fd_set *RSet,fd_set *WSet)
{
+ bool Res = true;
+
for (Worker *I = Workers; I != 0; I = I->NextAcquire)
{
if (I->InFd >= 0 && FD_ISSET(I->InFd,RSet) != 0)
- I->InFdReady();
+ Res &= I->InFdReady();
if (I->OutFd >= 0 && FD_ISSET(I->OutFd,WSet) != 0)
- I->OutFdReady();
+ Res &= I->OutFdReady();
}
+
+ return Res;
}
/*}}}*/
// Acquire::Run - Run the fetch sequence /*{{{*/
/* This runs the queues. It manages a select loop for all of the
Worker tasks. The workers interact with the queues and items to
manage the actual fetch. */
+static bool IsAccessibleBySandboxUser(std::string const &filename, bool const ReadWrite)
+{
+ // you would think this is easily to answer with faccessat, right? Wrong!
+ // It e.g. gets groups wrong, so the only thing which works reliable is trying
+ // to open the file we want to open later on…
+ if (unlikely(filename.empty()))
+ return true;
+
+ if (ReadWrite == false)
+ {
+ errno = 0;
+ // can we read a file? Note that non-existing files are "fine"
+ int const fd = open(filename.c_str(), O_RDONLY | O_CLOEXEC);
+ if (fd == -1 && errno == EACCES)
+ return false;
+ close(fd);
+ return true;
+ }
+ else
+ {
+ // the file might not exist yet and even if it does we will fix permissions,
+ // so important is here just that the directory it is in allows that
+ std::string const dirname = flNotFile(filename);
+ if (unlikely(dirname.empty()))
+ return true;
+
+ char const * const filetag = ".apt-acquire-privs-test.XXXXXX";
+ std::string const tmpfile_tpl = flCombine(dirname, filetag);
+ std::unique_ptr<char, decltype(std::free) *> tmpfile { strdup(tmpfile_tpl.c_str()), std::free };
+ int const fd = mkstemp(tmpfile.get());
+ if (fd == -1 && errno == EACCES)
+ return false;
+ RemoveFile("IsAccessibleBySandboxUser", tmpfile.get());
+ close(fd);
+ return true;
+ }
+}
+static void CheckDropPrivsMustBeDisabled(pkgAcquire const &Fetcher)
+{
+ if(getuid() != 0)
+ return;
+
+ std::string const SandboxUser = _config->Find("APT::Sandbox::User");
+ if (SandboxUser.empty() || SandboxUser == "root")
+ return;
+
+ struct passwd const * const pw = getpwnam(SandboxUser.c_str());
+ if (pw == NULL)
+ {
+ _error->Warning(_("No sandbox user '%s' on the system, can not drop privileges"), SandboxUser.c_str());
+ _config->Set("APT::Sandbox::User", "");
+ return;
+ }
+
+ gid_t const old_euid = geteuid();
+ gid_t const old_egid = getegid();
+
+ long const ngroups_max = sysconf(_SC_NGROUPS_MAX);
+ std::unique_ptr<gid_t[]> old_gidlist(new gid_t[ngroups_max]);
+ if (unlikely(old_gidlist == NULL))
+ return;
+ ssize_t old_gidlist_nr;
+ if ((old_gidlist_nr = getgroups(ngroups_max, old_gidlist.get())) < 0)
+ {
+ _error->FatalE("getgroups", "getgroups %lu failed", ngroups_max);
+ old_gidlist[0] = 0;
+ old_gidlist_nr = 1;
+ }
+ if (setgroups(1, &pw->pw_gid))
+ _error->FatalE("setgroups", "setgroups %u failed", pw->pw_gid);
+
+ if (setegid(pw->pw_gid) != 0)
+ _error->FatalE("setegid", "setegid %u failed", pw->pw_gid);
+ if (seteuid(pw->pw_uid) != 0)
+ _error->FatalE("seteuid", "seteuid %u failed", pw->pw_uid);
+
+ for (pkgAcquire::ItemCIterator I = Fetcher.ItemsBegin();
+ I != Fetcher.ItemsEnd(); ++I)
+ {
+ // no need to drop privileges for a complete file
+ if ((*I)->Complete == true || (*I)->Status != pkgAcquire::Item::StatIdle)
+ continue;
+
+ // if destination file is inaccessible all hope is lost for privilege dropping
+ if (IsAccessibleBySandboxUser((*I)->DestFile, true) == false)
+ {
+ _error->WarningE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."),
+ (*I)->DestFile.c_str(), SandboxUser.c_str());
+ _config->Set("APT::Sandbox::User", "");
+ break;
+ }
+
+ // if its the source file (e.g. local sources) we might be lucky
+ // by dropping the dropping only for some methods.
+ URI const source = (*I)->DescURI();
+ if (source.Access == "file" || source.Access == "copy")
+ {
+ std::string const conf = "Binary::" + source.Access + "::APT::Sandbox::User";
+ if (_config->Exists(conf) == true)
+ continue;
+
+ if (IsAccessibleBySandboxUser(source.Path, false) == false)
+ {
+ _error->NoticeE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."),
+ source.Path.c_str(), SandboxUser.c_str());
+ _config->CndSet("Binary::file::APT::Sandbox::User", "root");
+ _config->CndSet("Binary::copy::APT::Sandbox::User", "root");
+ }
+ }
+ }
+
+ if (seteuid(old_euid) != 0)
+ _error->FatalE("seteuid", "seteuid %u failed", old_euid);
+ if (setegid(old_egid) != 0)
+ _error->FatalE("setegid", "setegid %u failed", old_egid);
+ if (setgroups(old_gidlist_nr, old_gidlist.get()))
+ _error->FatalE("setgroups", "setgroups %u failed", 0);
+}
pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall)
{
+ _error->PushToStack();
+ CheckDropPrivsMustBeDisabled(*this);
+
Running = true;
for (Queue *I = Queues; I != 0; I = I->Next)
_error->Errno("select","Select has failed");
break;
}
-
- RunFds(&RFds,&WFds);
- if (_error->PendingError() == true)
- break;
-
+
+ if(RunFdsSane(&RFds,&WFds) == false)
+ break;
+
// Timeout, notify the log class
if (Res == 0 || (Log != 0 && Log->Update == true))
{
// Shut down the items
for (ItemIterator I = Items.begin(); I != Items.end(); ++I)
- (*I)->Finished();
-
- if (_error->PendingError())
+ (*I)->Finished();
+
+ bool const newError = _error->PendingError();
+ _error->MergeWithStack();
+ if (newError)
return Failed;
if (WasCancelled)
return Cancelled;
// Skip some files..
if (strcmp(Dir->d_name,"lock") == 0 ||
strcmp(Dir->d_name,"partial") == 0 ||
+ strcmp(Dir->d_name,"lost+found") == 0 ||
strcmp(Dir->d_name,".") == 0 ||
strcmp(Dir->d_name,"..") == 0)
continue;
// Nothing found, nuke it
if (I == Items.end())
- unlink(Dir->d_name);
+ RemoveFile("Clean", Dir->d_name);
};
closedir(D);
/* This is the total number of bytes needed */
APT_PURE unsigned long long pkgAcquire::TotalNeeded()
{
- unsigned long long Total = 0;
- for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I)
- Total += (*I)->FileSize;
- return Total;
+ return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu,
+ [](unsigned long long const T, Item const * const I) {
+ return T + I->FileSize;
+ });
}
/*}}}*/
// Acquire::FetchNeeded - Number of bytes needed to get /*{{{*/
/* This is the number of bytes that is not local */
APT_PURE unsigned long long pkgAcquire::FetchNeeded()
{
- unsigned long long Total = 0;
- for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I)
- if ((*I)->Local == false)
- Total += (*I)->FileSize;
- return Total;
+ return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu,
+ [](unsigned long long const T, Item const * const I) {
+ if (I->Local == false)
+ return T + I->FileSize;
+ else
+ return T;
+ });
}
/*}}}*/
// Acquire::PartialPresent - Number of partial bytes we already have /*{{{*/
/* This is the number of bytes that is not local */
APT_PURE unsigned long long pkgAcquire::PartialPresent()
{
- unsigned long long Total = 0;
- for (ItemCIterator I = ItemsBegin(); I != ItemsEnd(); ++I)
- if ((*I)->Local == false)
- Total += (*I)->PartialSize;
- return Total;
+ return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu,
+ [](unsigned long long const T, Item const * const I) {
+ if (I->Local == false)
+ return T + I->PartialSize;
+ else
+ return T;
+ });
}
/*}}}*/
// Acquire::UriBegin - Start iterator for the uri list /*{{{*/
{
QItem **I = &Items;
// move to the end of the queue and check for duplicates here
- HashStringList const hsl = Item.Owner->GetExpectedHashes();
for (; *I != 0; I = &(*I)->Next)
- if (Item.URI == (*I)->URI || hsl == (*I)->Owner->GetExpectedHashes())
+ if (Item.URI == (*I)->URI)
{
if (_config->FindB("Debug::pkgAcquire::Worker",false) == true)
std::cerr << " @ Queue: Action combined for " << Item.URI << " and " << (*I)->URI << std::endl;
if (Workers == 0)
{
URI U(Name);
- pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access);
- if (Cnf == 0)
+ pkgAcquire::MethodConfig * const Cnf = Owner->GetConfig(U.Access);
+ if (unlikely(Cnf == nullptr))
return false;
-
+
+ // now-running twin of the pkgAcquire::Enqueue call
+ for (QItem *I = Items; I != 0; )
+ {
+ bool pointless = false;
+ for (auto &&O: I->Owners)
+ if (DoesAcquireResultInInstantFailure(O, Cnf, Owner->Log))
+ pointless = true;
+ I = pointless ? Items : I->Next;
+ }
+
Workers = new Worker(this,Cnf,Owner->Log);
Owner->Add(Workers);
if (Workers->Start() == false)
return true;
I->Worker = Workers;
- for (QItem::owner_iterator O = I->Owners.begin(); O != I->Owners.end(); ++O)
- (*O)->Status = pkgAcquire::Item::StatFetching;
+ for (auto const &O: I->Owners)
+ O->Status = pkgAcquire::Item::StatFetching;
PipeDepth++;
if (Workers->QueueItem(I) == false)
return false;
APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const /*{{{*/
{
unsigned long long Maximum = std::numeric_limits<unsigned long long>::max();
- for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O)
+ for (auto const &O: Owners)
{
- if ((*O)->FileSize == 0)
+ if (O->FileSize == 0)
continue;
- Maximum = std::min(Maximum, (*O)->FileSize);
+ Maximum = std::min(Maximum, O->FileSize);
}
if (Maximum == std::numeric_limits<unsigned long long>::max())
return 0;
if (lstat((*O)->DestFile.c_str(),&file) == 0)
{
if ((file.st_mode & S_IFREG) == 0)
- unlink((*O)->DestFile.c_str());
+ RemoveFile("SyncDestinationFiles", (*O)->DestFile);
else if (supersize < file.st_size)
{
supersize = file.st_size;
- unlink(superfile.c_str());
+ RemoveFile("SyncDestinationFiles", superfile);
rename((*O)->DestFile.c_str(), superfile.c_str());
}
else
- unlink((*O)->DestFile.c_str());
+ RemoveFile("SyncDestinationFiles", (*O)->DestFile);
if (symlink(superfile.c_str(), (*O)->DestFile.c_str()) != 0)
{
; // not a problem per-se and no real alternative
CurrentBytes = 0;
TotalItems = 0;
CurrentItems = 0;
-
+
// Compute the total number of bytes to fetch
unsigned int Unknown = 0;
unsigned int Count = 0;
- bool UnfetchedReleaseFiles = false;
+ bool ExpectAdditionalItems = false;
for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin();
I != Owner->ItemsEnd();
++I, ++Count)
TotalItems++;
if ((*I)->Status == pkgAcquire::Item::StatDone)
++CurrentItems;
-
- // Totally ignore local items
- if ((*I)->Local == true)
- continue;
- // see if the method tells us to expect more
- TotalItems += (*I)->ExpectedAdditionalItems;
-
- // check if there are unfetched Release files
- if ((*I)->Complete == false && (*I)->ExpectedAdditionalItems > 0)
- UnfetchedReleaseFiles = true;
+ // do we expect to acquire more files than we know of yet?
+ if ((*I)->ExpectedAdditionalItems > 0)
+ ExpectAdditionalItems = true;
TotalBytes += (*I)->FileSize;
if ((*I)->Complete == true)
if ((*I)->FileSize == 0 && (*I)->Complete == false)
++Unknown;
}
-
+
// Compute the current completion
unsigned long long ResumeSize = 0;
for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0;
if (CurrentBytes > TotalBytes)
CurrentBytes = TotalBytes;
- // debug
- if (_config->FindB("Debug::acquire::progress", false) == true)
- std::clog << " Bytes: "
- << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes)
- << std::endl;
-
// Compute the CPS
struct timeval NewTime;
gettimeofday(&NewTime,0);
double const OldPercent = Percent;
// calculate the percentage, if we have too little data assume 1%
- if (TotalBytes > 0 && UnfetchedReleaseFiles)
+ if (ExpectAdditionalItems)
Percent = 0;
else
// use both files and bytes because bytes can be unreliable
Percent = (0.8 * (CurrentBytes/float(TotalBytes)*100.0) +
0.2 * (CurrentItems/float(TotalItems)*100.0));
+
+ // debug
+ if (_config->FindB("Debug::acquire::progress", false) == true)
+ {
+ std::clog
+ << "["
+ << std::setw(5) << std::setprecision(4) << std::showpoint << Percent
+ << "]"
+ << " Bytes: "
+ << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes)
+ << " # Files: "
+ << CurrentItems << " / " << TotalItems
+ << std::endl;
+ }
+
double const DiffPercent = Percent - OldPercent;
if (DiffPercent < 0.001 && _config->FindB("Acquire::Progress::Diffpercent", false) == true)
return true;
snprintf(msg,sizeof(msg), _("Retrieving file %li of %li"), i, TotalItems);
// build the status str
- status << "dlstatus:" << i
- << ":" << std::setprecision(3) << Percent
- << ":" << msg
- << endl;
-
- std::string const dlstatus = status.str();
- FileFd::Write(fd, dlstatus.c_str(), dlstatus.size());
+ std::ostringstream str;
+ str.imbue(std::locale("C.UTF-8"));
+ str.precision(4);
+ str << "dlstatus" << ':' << std::fixed << i << ':' << Percent << ':' << msg << '\n';
+ auto const dlstatus = str.str();
+ FileFd::Write(fd, dlstatus.data(), dlstatus.size());
}
return true;