]> git.saurik.com Git - apt.git/blobdiff - apt-pkg/acquire.cc
Bug #807012 also involves package dependencies :/.
[apt.git] / apt-pkg / acquire.cc
index c65aef329b3d3123de076b24bf94e8ffb8aabc13..4ccfa26052a5a2488a072c143674801f393d7f96 100644 (file)
@@ -80,7 +80,7 @@ void pkgAcquire::Initialize()
    if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
    {
       struct passwd const * const pw = getpwnam(SandboxUser.c_str());
-      struct group const * const gr = getgrnam("root");
+      struct group const * const gr = getgrnam(ROOT_GROUP);
       if (pw != NULL && gr != NULL)
       {
         std::string const AuthConf = _config->FindFile("Dir::Etc::netrc");
@@ -106,7 +106,7 @@ static bool SetupAPTPartialDirectory(std::string const &grand, std::string const
    if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it
    {
       struct passwd const * const pw = getpwnam(SandboxUser.c_str());
-      struct group const * const gr = getgrnam("root");
+      struct group const * const gr = getgrnam(ROOT_GROUP);
       if (pw != NULL && gr != NULL)
       {
          // chown the partial dir
@@ -269,6 +269,42 @@ void pkgAcquire::Remove(Worker *Work)
    it is constructed which creates a queue (based on the current queue
    mode) and puts the item in that queue. If the system is running then
    the queue might be started. */
+static bool CheckForBadItemAndFailIt(pkgAcquire::Item * const Item,
+      pkgAcquire::MethodConfig const * const Config, pkgAcquireStatus * const Log)
+{
+   auto SavedDesc = Item->GetItemDesc();
+   if (Item->IsRedirectionLoop(SavedDesc.URI))
+   {
+      std::string const Message = "400 URI Failure"
+        "\nURI: " + SavedDesc.URI +
+        "\nFilename: " + Item->DestFile +
+        "\nFailReason: RedirectionLoop";
+
+      Item->Status = pkgAcquire::Item::StatError;
+      Item->Failed(Message, Config);
+      if (Log != nullptr)
+        Log->Fail(SavedDesc);
+      return true;
+   }
+
+   HashStringList const hsl = Item->GetExpectedHashes();
+   if (hsl.usable() == false && Item->HashesRequired() &&
+        _config->Exists("Acquire::ForceHash") == false)
+   {
+      std::string const Message = "400 URI Failure"
+        "\nURI: " + SavedDesc.URI +
+        "\nFilename: " + Item->DestFile +
+        "\nFailReason: WeakHashSums";
+
+      auto SavedDesc = Item->GetItemDesc();
+      Item->Status = pkgAcquire::Item::StatAuthError;
+      Item->Failed(Message, Config);
+      if (Log != nullptr)
+        Log->Fail(SavedDesc);
+      return true;
+   }
+   return false;
+}
 void pkgAcquire::Enqueue(ItemDesc &Item)
 {
    // Determine which queue to put the item in
@@ -277,6 +313,13 @@ void pkgAcquire::Enqueue(ItemDesc &Item)
    if (Name.empty() == true)
       return;
 
+   /* the check for running avoids that we produce errors
+      in logging before we actually have started, which would
+      be easier to implement but would confuse users/implementations
+      so we check the items skipped here in #Startup */
+   if (Running && CheckForBadItemAndFailIt(Item.Owner, Config, Log))
+      return;
+
    // Find the queue structure
    Queue *I = Queues;
    for (; I != 0 && I->Name != Name; I = I->Next);
@@ -386,6 +429,24 @@ string pkgAcquire::QueueName(string Uri,MethodConfig const *&Config)
    } else
    {
       FullQueueName = AccessSchema + U.Host;
+
+      int parallel(_config->FindI("Acquire::"+U.Access+"::MaxParallel",8));
+      if (parallel > 0) {
+        typedef map<string, int> indexmap;
+        static indexmap indices;
+
+        pair<indexmap::iterator, bool> cache(indices.insert(indexmap::value_type(FullQueueName, -1)));
+        if (cache.second || cache.first->second == -1) {
+           int &index(indices[U.Access]);
+           if (index >= parallel)
+              index = 0;
+           cache.first->second = index++;
+        }
+
+        ostringstream value;
+        value << U.Access << "::" << cache.first->second;
+        FullQueueName = value.str();
+      }
    }
    unsigned int Instances = 0, SchemaLength = AccessSchema.length();
 
@@ -466,7 +527,7 @@ void pkgAcquire::SetFds(int &Fd,fd_set *RSet,fd_set *WSet)
 void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet)
 {
    RunFdsSane(RSet, WSet);
-};
+}
                                                                        /*}}}*/
 // Acquire::RunFdsSane - Deal with active FDs                          /*{{{*/
 // ---------------------------------------------------------------------
@@ -851,9 +912,10 @@ pkgAcquire::Queue::~Queue()
 /* */
 bool pkgAcquire::Queue::Enqueue(ItemDesc &Item)
 {
+   QItem **OptimalI = &Items;
    QItem **I = &Items;
    // move to the end of the queue and check for duplicates here
-   for (; *I != 0; I = &(*I)->Next)
+   for (; *I != 0; ) {
       if (Item.URI == (*I)->URI)
       {
         if (_config->FindB("Debug::pkgAcquire::Worker",false) == true)
@@ -862,12 +924,22 @@ bool pkgAcquire::Queue::Enqueue(ItemDesc &Item)
         Item.Owner->Status = (*I)->Owner->Status;
         return false;
       }
+      // Determine the optimal position to insert: before anything with a
+      // higher priority.
+      int priority = (*I)->GetPriority();
+
+      I = &(*I)->Next;
+      if (priority >= Item.Owner->Priority()) {
+        OptimalI = I;
+      }
+   }
+
 
    // Create a new item
    QItem *Itm = new QItem;
    *Itm = Item;
-   Itm->Next = 0;
-   *I = Itm;
+   Itm->Next = *OptimalI;
+   *OptimalI = Itm;
    
    Item.Owner->QueueCounter++;   
    if (Items->Next == 0)
@@ -912,10 +984,20 @@ bool pkgAcquire::Queue::Startup()
    if (Workers == 0)
    {
       URI U(Name);
-      pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access);
-      if (Cnf == 0)
+      pkgAcquire::MethodConfig * const Cnf = Owner->GetConfig(U.Access);
+      if (unlikely(Cnf == nullptr))
         return false;
-      
+
+      // now-running twin of the pkgAcquire::Enqueue call
+      for (QItem *I = Items; I != 0; )
+      {
+        auto const INext = I->Next;
+        for (auto &&O: I->Owners)
+           CheckForBadItemAndFailIt(O, Cnf, Owner->Log);
+        // if an item failed, it will be auto-dequeued invalidation our I here
+        I = INext;
+      }
+
       Workers = new Worker(this,Cnf,Owner->Log);
       Owner->Add(Workers);
       if (Workers->Start() == false)
@@ -1007,16 +1089,24 @@ bool pkgAcquire::Queue::Cycle()
 
    // Look for a queable item
    QItem *I = Items;
+   int ActivePriority = 0;
    while (PipeDepth < (signed)MaxPipeDepth)
    {
-      for (; I != 0; I = I->Next)
+      for (; I != 0; I = I->Next) {
+        if (I->Owner->Status == pkgAcquire::Item::StatFetching)
+           ActivePriority = std::max(ActivePriority, I->GetPriority());
         if (I->Owner->Status == pkgAcquire::Item::StatIdle)
            break;
+      }
 
       // Nothing to do, queue is idle.
       if (I == 0)
         return true;
 
+      // This item has a lower priority than stuff in the pipeline, pretend
+      // the queue is idle
+      if (I->GetPriority() < ActivePriority)
+        return true;
       I->Worker = Workers;
       for (auto const &O: I->Owners)
         O->Status = pkgAcquire::Item::StatFetching;
@@ -1082,6 +1172,15 @@ APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const     /*{
    return Maximum;
 }
                                                                        /*}}}*/
+APT_PURE int pkgAcquire::Queue::QItem::GetPriority() const             /*{{{*/
+{
+   int Priority = 0;
+   for (auto const &O: Owners)
+      Priority = std::max(Priority, O->Priority());
+
+   return Priority;
+}
+                                                                       /*}}}*/
 void pkgAcquire::Queue::QItem::SyncDestinationFiles() const            /*{{{*/
 {
    /* ensure that the first owner has the best partial file of all and
@@ -1147,7 +1246,7 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
    // Compute the total number of bytes to fetch
    unsigned int Unknown = 0;
    unsigned int Count = 0;
-   bool UnfetchedReleaseFiles = false;
+   bool ExpectAdditionalItems = false;
    for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin(); 
         I != Owner->ItemsEnd();
        ++I, ++Count)
@@ -1156,12 +1255,9 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
       if ((*I)->Status == pkgAcquire::Item::StatDone)
         ++CurrentItems;
 
-      // see if the method tells us to expect more
-      TotalItems += (*I)->ExpectedAdditionalItems;
-
-      // check if there are unfetched Release files
-      if ((*I)->Status != pkgAcquire::Item::StatDone && (*I)->ExpectedAdditionalItems > 0)
-         UnfetchedReleaseFiles = true;
+      // do we expect to acquire more files than we know of yet?
+      if ((*I)->ExpectedAdditionalItems > 0)
+         ExpectAdditionalItems = true;
 
       TotalBytes += (*I)->FileSize;
       if ((*I)->Complete == true)
@@ -1218,7 +1314,7 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
 
    double const OldPercent = Percent;
    // calculate the percentage, if we have too little data assume 1%
-   if (TotalBytes > 0 && UnfetchedReleaseFiles)
+   if (ExpectAdditionalItems)
       Percent = 0;
    else
       // use both files and bytes because bytes can be unreliable
@@ -1261,13 +1357,12 @@ bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
         snprintf(msg,sizeof(msg), _("Retrieving file %li of %li"), i, TotalItems);
 
       // build the status str
-      status << "dlstatus:" << i
-             << ":"  << std::setprecision(3) << Percent
-             << ":" << msg
-             << endl;
-
-      std::string const dlstatus = status.str();
-      FileFd::Write(fd, dlstatus.c_str(), dlstatus.size());
+      std::ostringstream str;
+      str.imbue(std::locale::classic());
+      str.precision(4);
+      str << "dlstatus" << ':' << std::fixed << i << ':' << Percent << ':' << msg << '\n';
+      auto const dlstatus = str.str();
+      FileFd::Write(fd, dlstatus.data(), dlstatus.size());
    }
 
    return true;