if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
{
struct passwd const * const pw = getpwnam(SandboxUser.c_str());
- struct group const * const gr = getgrnam("root");
+ struct group const * const gr = getgrnam(ROOT_GROUP);
if (pw != NULL && gr != NULL)
{
std::string const AuthConf = _config->FindFile("Dir::Etc::netrc");
if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
{
struct passwd const * const pw = getpwnam(SandboxUser.c_str());
- struct group const * const gr = getgrnam("root");
+ struct group const * const gr = getgrnam(ROOT_GROUP);
if (pw != NULL && gr != NULL)
{
// chown the partial dir
it is constructed which creates a queue (based on the current queue
mode) and puts the item in that queue. If the system is running then
the queue might be started. */
+static bool CheckForBadItemAndFailIt(pkgAcquire::Item * const Item,
+ pkgAcquire::MethodConfig const * const Config, pkgAcquireStatus * const Log)
+{
+ auto SavedDesc = Item->GetItemDesc();
+ if (Item->IsRedirectionLoop(SavedDesc.URI))
+ {
+ std::string const Message = "400 URI Failure"
+ "\nURI: " + SavedDesc.URI +
+ "\nFilename: " + Item->DestFile +
+ "\nFailReason: RedirectionLoop";
+
+ Item->Status = pkgAcquire::Item::StatError;
+ Item->Failed(Message, Config);
+ if (Log != nullptr)
+ Log->Fail(SavedDesc);
+ return true;
+ }
+
+ HashStringList const hsl = Item->GetExpectedHashes();
+ if (hsl.usable() == false && Item->HashesRequired() &&
+ _config->Exists("Acquire::ForceHash") == false)
+ {
+ std::string const Message = "400 URI Failure"
+ "\nURI: " + SavedDesc.URI +
+ "\nFilename: " + Item->DestFile +
+ "\nFailReason: WeakHashSums";
+
+ auto SavedDesc = Item->GetItemDesc();
+ Item->Status = pkgAcquire::Item::StatAuthError;
+ Item->Failed(Message, Config);
+ if (Log != nullptr)
+ Log->Fail(SavedDesc);
+ return true;
+ }
+ return false;
+}
void pkgAcquire::Enqueue(ItemDesc &Item)
{
// Determine which queue to put the item in
if (Name.empty() == true)
return;
+ /* the check for running avoids that we produce errors
+ in logging before we actually have started, which would
+ be easier to implement but would confuse users/implementations
+ so we check the items skipped here in #Startup */
+ if (Running && CheckForBadItemAndFailIt(Item.Owner, Config, Log))
+ return;
+
// Find the queue structure
Queue *I = Queues;
for (; I != 0 && I->Name != Name; I = I->Next);
return U.Access;
string AccessSchema = U.Access + ':';
- string FullQueueName;
+ string FullQueueName;
if (U.Host.empty())
{
- long randomQueue = random();
+ long existing = 0;
+ // check how many queues exist already and reuse empty ones
+ for (Queue const *I = Queues; I != 0; I = I->Next)
+ if (I->Name.compare(0, AccessSchema.length(), AccessSchema) == 0)
+ {
+ if (I->Items == nullptr)
+ return I->Name;
+ ++existing;
+ }
+
#ifdef _SC_NPROCESSORS_ONLN
long cpuCount = sysconf(_SC_NPROCESSORS_ONLN) * 2;
#else
- long cpuCount = _config->FindI("Acquire::QueueHost::Limit",10);
+ long cpuCount = 10;
#endif
- if (cpuCount > 0)
- randomQueue %= cpuCount;
+ cpuCount = _config->FindI("Acquire::QueueHost::Limit", cpuCount);
- strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue);
- if (Debug) {
- clog << "Chose random queue " << FullQueueName << " for " << Uri << endl;
+ if (cpuCount <= 0 || existing < cpuCount)
+ strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), existing);
+ else
+ {
+ long const randomQueue = random() % cpuCount;
+ strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue);
}
+
+ if (Debug)
+ clog << "Chose random queue " << FullQueueName << " for " << Uri << endl;
} else
{
FullQueueName = AccessSchema + U.Host;
+
+ int parallel(_config->FindI("Acquire::"+U.Access+"::MaxParallel",8));
+ if (parallel > 0) {
+ typedef map<string, int> indexmap;
+ static indexmap indices;
+
+ pair<indexmap::iterator, bool> cache(indices.insert(indexmap::value_type(FullQueueName, -1)));
+ if (cache.second || cache.first->second == -1) {
+ int &index(indices[U.Access]);
+ if (index >= parallel)
+ index = 0;
+ cache.first->second = index++;
+ }
+
+ ostringstream value;
+ value << U.Access << "::" << cache.first->second;
+ FullQueueName = value.str();
+ }
}
unsigned int Instances = 0, SchemaLength = AccessSchema.length();
void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet)
{
RunFdsSane(RSet, WSet);
-};
+}
/*}}}*/
// Acquire::RunFdsSane - Deal with active FDs /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Queue::Enqueue(ItemDesc &Item)
{
+ QItem **OptimalI = &Items;
QItem **I = &Items;
// move to the end of the queue and check for duplicates here
- for (; *I != 0; I = &(*I)->Next)
+ for (; *I != 0; ) {
if (Item.URI == (*I)->URI)
{
if (_config->FindB("Debug::pkgAcquire::Worker",false) == true)
Item.Owner->Status = (*I)->Owner->Status;
return false;
}
+ // Determine the optimal position to insert: before anything with a
+ // higher priority.
+ int priority = (*I)->GetPriority();
+
+ I = &(*I)->Next;
+ if (priority >= Item.Owner->Priority()) {
+ OptimalI = I;
+ }
+ }
+
// Create a new item
QItem *Itm = new QItem;
*Itm = Item;
- Itm->Next = 0;
- *I = Itm;
+ Itm->Next = *OptimalI;
+ *OptimalI = Itm;
Item.Owner->QueueCounter++;
if (Items->Next == 0)
if (Workers == 0)
{
URI U(Name);
- pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access);
- if (Cnf == 0)
+ pkgAcquire::MethodConfig * const Cnf = Owner->GetConfig(U.Access);
+ if (unlikely(Cnf == nullptr))
return false;
-
+
+ // now-running twin of the pkgAcquire::Enqueue call
+ for (QItem *I = Items; I != 0; )
+ {
+ auto const INext = I->Next;
+ for (auto &&O: I->Owners)
+ CheckForBadItemAndFailIt(O, Cnf, Owner->Log);
+ // if an item failed, it will be auto-dequeued invalidation our I here
+ I = INext;
+ }
+
Workers = new Worker(this,Cnf,Owner->Log);
Owner->Add(Workers);
if (Workers->Start() == false)
// Look for a queable item
QItem *I = Items;
+ int ActivePriority = 0;
while (PipeDepth < (signed)MaxPipeDepth)
{
- for (; I != 0; I = I->Next)
+ for (; I != 0; I = I->Next) {
+ if (I->Owner->Status == pkgAcquire::Item::StatFetching)
+ ActivePriority = std::max(ActivePriority, I->GetPriority());
if (I->Owner->Status == pkgAcquire::Item::StatIdle)
break;
+ }
// Nothing to do, queue is idle.
if (I == 0)
return true;
+ // This item has a lower priority than stuff in the pipeline, pretend
+ // the queue is idle
+ if (I->GetPriority() < ActivePriority)
+ return true;
I->Worker = Workers;
for (auto const &O: I->Owners)
O->Status = pkgAcquire::Item::StatFetching;
return Maximum;
}
/*}}}*/
+APT_PURE int pkgAcquire::Queue::QItem::GetPriority() const /*{{{*/
+{
+ int Priority = 0;
+ for (auto const &O: Owners)
+ Priority = std::max(Priority, O->Priority());
+
+ return Priority;
+}
+ /*}}}*/
void pkgAcquire::Queue::QItem::SyncDestinationFiles() const /*{{{*/
{
/* ensure that the first owner has the best partial file of all and
CurrentBytes = 0;
TotalItems = 0;
CurrentItems = 0;
-
+
// Compute the total number of bytes to fetch
unsigned int Unknown = 0;
unsigned int Count = 0;
- bool UnfetchedReleaseFiles = false;
+ bool ExpectAdditionalItems = false;
for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin();
I != Owner->ItemsEnd();
++I, ++Count)
TotalItems++;
if ((*I)->Status == pkgAcquire::Item::StatDone)
++CurrentItems;
-
- // Totally ignore local items
- if ((*I)->Local == true)
- continue;
-
- // see if the method tells us to expect more
- TotalItems += (*I)->ExpectedAdditionalItems;
- // check if there are unfetched Release files
- if ((*I)->Complete == false && (*I)->ExpectedAdditionalItems > 0)
- UnfetchedReleaseFiles = true;
+ // do we expect to acquire more files than we know of yet?
+ if ((*I)->ExpectedAdditionalItems > 0)
+ ExpectAdditionalItems = true;
TotalBytes += (*I)->FileSize;
if ((*I)->Complete == true)
if ((*I)->FileSize == 0 && (*I)->Complete == false)
++Unknown;
}
-
+
// Compute the current completion
unsigned long long ResumeSize = 0;
for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0;
if (CurrentBytes > TotalBytes)
CurrentBytes = TotalBytes;
- // debug
- if (_config->FindB("Debug::acquire::progress", false) == true)
- std::clog << " Bytes: "
- << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes)
- << std::endl;
-
// Compute the CPS
struct timeval NewTime;
gettimeofday(&NewTime,0);
double const OldPercent = Percent;
// calculate the percentage, if we have too little data assume 1%
- if (TotalBytes > 0 && UnfetchedReleaseFiles)
+ if (ExpectAdditionalItems)
Percent = 0;
else
// use both files and bytes because bytes can be unreliable
Percent = (0.8 * (CurrentBytes/float(TotalBytes)*100.0) +
0.2 * (CurrentItems/float(TotalItems)*100.0));
+
+ // debug
+ if (_config->FindB("Debug::acquire::progress", false) == true)
+ {
+ std::clog
+ << "["
+ << std::setw(5) << std::setprecision(4) << std::showpoint << Percent
+ << "]"
+ << " Bytes: "
+ << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes)
+ << " # Files: "
+ << CurrentItems << " / " << TotalItems
+ << std::endl;
+ }
+
double const DiffPercent = Percent - OldPercent;
if (DiffPercent < 0.001 && _config->FindB("Acquire::Progress::Diffpercent", false) == true)
return true;
snprintf(msg,sizeof(msg), _("Retrieving file %li of %li"), i, TotalItems);
// build the status str
- status << "dlstatus:" << i
- << ":" << std::setprecision(3) << Percent
- << ":" << msg
- << endl;
-
- std::string const dlstatus = status.str();
- FileFd::Write(fd, dlstatus.c_str(), dlstatus.size());
+ std::ostringstream str;
+ str.imbue(std::locale::classic());
+ str.precision(4);
+ str << "dlstatus" << ':' << std::fixed << i << ':' << Percent << ':' << msg << '\n';
+ auto const dlstatus = str.str();
+ FileFd::Write(fd, dlstatus.data(), dlstatus.size());
}
return true;